From 30d1a735c43c20e53a9c1f77758a119aad99d6d8 Mon Sep 17 00:00:00 2001 From: Logan Cary Date: Wed, 7 Aug 2024 15:54:12 -0400 Subject: [PATCH] test --- ...rt.py => test_008_snapshot_count_alert.py} | 0 tests/api2/test_009_fenced.py | 9 - tests/api2/test_011_user.py | 722 ----- tests/api2/test_012_directory_service_ssh.py | 64 - tests/api2/test_014_failover_related.py | 108 - tests/api2/test_015_services.py | 83 - tests/api2/test_020_account.py | 50 - tests/api2/test_030_activedirectory.py | 416 --- tests/api2/test_032_ad_kerberos.py | 355 --- tests/api2/test_035_ad_idmap.py | 375 --- tests/api2/test_040_ad_user_group_cache.py | 204 -- tests/api2/test_050_alert.py | 131 - tests/api2/test_070_alertservice.py | 98 - tests/api2/test_090_boot.py | 58 - tests/api2/test_100_bootenv.py | 107 - tests/api2/test_110_certificate.py | 84 - tests/api2/test_120_certificateauthority.py | 17 - tests/api2/test_140_core.py | 58 - tests/api2/test_150_cronjob.py | 71 - tests/api2/test_190_filesystem.py | 367 --- tests/api2/test_200_ftp.py | 1404 --------- tests/api2/test_210_group.py | 173 -- tests/api2/test_230_idmap.py | 55 - tests/api2/test_260_iscsi.py | 647 ---- tests/api2/test_261_iscsi_cmd.py | 2736 ----------------- tests/api2/test_262_iscsi_alua.py | 147 - tests/api2/test_275_ldap.py | 77 - tests/api2/test_278_freeipa.py | 106 - tests/api2/test_290_mail.py | 58 - tests/api2/test_300_nfs.py | 1789 ----------- tests/api2/test_310_service_announcement.py | 479 --- tests/api2/test_330_pool_acltype.py | 63 - .../api2/test_341_pool_dataset_encryption.py | 1130 ------- tests/api2/test_344_acl_templates.py | 215 -- tests/api2/test_345_acl_nfs4.py | 946 ------ tests/api2/test_347_posix_mode.py | 656 ---- tests/api2/test_348_posix_acl.py | 535 ---- .../api2/test_350_pool_dataset_quota_alert.py | 98 - tests/api2/test_360_pool_scrub.py | 77 - tests/api2/test_410_smart.py | 46 - tests/api2/test_420_smb.py | 449 --- tests/api2/test_425_smb_protocol.py | 807 ----- tests/api2/test_426_smb_vss.py | 361 --- tests/api2/test_427_smb_acl.py | 293 -- tests/api2/test_428_smb_rpc.py | 127 - tests/api2/test_430_smb_sharesec.py | 218 -- tests/api2/test_435_smb_registry.py | 415 --- tests/api2/test_438_snapshots.py | 521 ---- tests/api2/test_440_snmp.py | 445 --- tests/api2/test_450_staticroutes.py | 63 - tests/api2/test_470_system.py | 67 - tests/api2/test_475_syslog.py | 97 - tests/api2/test_490_system_general.py | 68 - tests/api2/test_500_system_ntpservers.py | 112 - tests/api2/test_530_ups.py | 236 -- tests/api2/test_541_vm.py | 268 -- tests/api2/test_550_vmware.py | 44 - tests/api2/test_790_update.py | 177 -- tests/api2/test_900_docs.py | 17 - tests/api2/test_999_pool_dataset_unlock.py | 191 -- tests/api2/test_account.py | 177 -- tests/api2/test_account_duplicate_uid_gid.py | 102 - tests/api2/test_account_idmap.py | 44 - tests/api2/test_account_privilege.py | 209 -- .../test_account_privilege_authentication.py | 204 -- tests/api2/test_account_privilege_role.py | 187 -- ...account_privilege_role_forbidden_fields.py | 57 - ...t_account_privilege_role_private_fields.py | 235 -- tests/api2/test_account_query_roles.py | 19 - tests/api2/test_account_root_password.py | 61 - tests/api2/test_account_shell_choices.py | 135 - tests/api2/test_account_ssh_key.py | 79 - tests/api2/test_alert_classes.py | 59 - tests/api2/test_api_key.py | 133 - tests/api2/test_api_key_crud.py | 49 - tests/api2/test_attachment_querying.py | 35 - tests/api2/test_audit_alerts.py | 123 - tests/api2/test_audit_api_key.py | 34 - tests/api2/test_audit_audit.py | 174 -- tests/api2/test_audit_basic.py | 252 -- tests/api2/test_audit_dataset.py | 31 - tests/api2/test_audit_ftp.py | 58 - tests/api2/test_audit_iscsi.py | 576 ---- tests/api2/test_audit_nfs.py | 129 - tests/api2/test_audit_permission.py | 55 - tests/api2/test_audit_rest.py | 205 -- tests/api2/test_audit_smb.py | 83 - tests/api2/test_audit_sudo.py | 254 -- tests/api2/test_audit_websocket.py | 372 --- tests/api2/test_auth_me.py | 106 - tests/api2/test_auth_otp.py | 43 - tests/api2/test_auth_token.py | 115 - tests/api2/test_block_hooks.py | 31 - tests/api2/test_boot_attach_replace_detach.py | 57 - tests/api2/test_boot_format.py | 13 - tests/api2/test_bootenv.py | 24 - tests/api2/test_can_access_as_user.py | 81 - tests/api2/test_certificate_roles.py | 41 - tests/api2/test_certs.py | 552 ---- tests/api2/test_client_job.py | 36 - tests/api2/test_cloud_backup.py | 271 -- tests/api2/test_cloud_sync.py | 239 -- tests/api2/test_cloud_sync_config.py | 34 - tests/api2/test_cloud_sync_credentials.py | 13 - tests/api2/test_cloud_sync_crud.py | 88 - tests/api2/test_cloud_sync_custom_s3.py | 49 - tests/api2/test_cloud_sync_script.py | 63 - tests/api2/test_cloud_sync_storj.py | 81 - tests/api2/test_config_upload.py | 60 - tests/api2/test_core_bulk.py | 97 - tests/api2/test_crud.py | 29 - tests/api2/test_crud_events.py | 138 - ..._dataset_encryption_keys_in_replication.py | 151 - tests/api2/test_dataset_mount.py | 19 - tests/api2/test_dataset_unlock_validation.py | 50 - tests/api2/test_device_get_disk_names.py | 5 - tests/api2/test_device_get_disks_size.py | 7 - tests/api2/test_disk_format.py | 97 - tests/api2/test_disk_get_dev_size.py | 15 - tests/api2/test_disk_temperature.py | 120 - tests/api2/test_disk_wipe.py | 110 - tests/api2/test_disk_zfs_guid.py | 66 - tests/api2/test_draid.py | 97 - .../api2/test_draid_record_and_block_size.py | 190 -- tests/api2/test_enable_disable_services.py | 11 - ...test_encrypted_dataset_services_restart.py | 73 - tests/api2/test_events.py | 14 - .../api2/test_filesystem__file_tail_follow.py | 42 - tests/api2/test_filesystem__put.py | 59 - tests/api2/test_group_utils.py | 17 - tests/api2/test_initshutdownscript.py | 118 - tests/api2/test_ipa_join.py | 116 - tests/api2/test_ipa_leave.py | 74 - tests/api2/test_iscsi.py | 70 - tests/api2/test_iscsi_auth_crud_roles.py | 22 - tests/api2/test_iscsi_auth_network.py | 190 -- tests/api2/test_iscsi_extent_crud_roles.py | 22 - tests/api2/test_iscsi_global_crud_roles.py | 21 - tests/api2/test_iscsi_host_crud_roles.py | 26 - tests/api2/test_iscsi_initiator_crud_roles.py | 22 - tests/api2/test_iscsi_portal_crud_roles.py | 22 - tests/api2/test_iscsi_target_crud_roles.py | 22 - .../test_iscsi_targetextent_crud_roles.py | 22 - tests/api2/test_job_credentials.py | 18 - tests/api2/test_job_errno.py | 27 - tests/api2/test_job_events.py | 60 - tests/api2/test_job_lock.py | 126 - tests/api2/test_job_logs.py | 27 - tests/api2/test_job_result.py | 27 - tests/api2/test_keychain_ssh.py | 91 - tests/api2/test_localhost_ws_auth.py | 11 - tests/api2/test_lock.py | 67 - tests/api2/test_network_configuration.py | 41 - tests/api2/test_nfs_share_crud_roles.py | 39 - tests/api2/test_openapi.py | 16 - tests/api2/test_password_reset.py | 132 - tests/api2/test_pool_attach.py | 26 - tests/api2/test_pool_dataset_acl.py | 104 - tests/api2/test_pool_dataset_create.py | 12 - tests/api2/test_pool_dataset_details.py | 41 - tests/api2/test_pool_dataset_encrypted.py | 43 - tests/api2/test_pool_dataset_info.py | 6 - tests/api2/test_pool_dataset_processes.py | 14 - .../api2/test_pool_dataset_snapshot_count.py | 22 - .../api2/test_pool_dataset_track_processes.py | 83 - ...ool_dataset_unlock_lock_immutable_flags.py | 54 - .../test_pool_dataset_unlock_recursive.py | 40 - .../test_pool_dataset_unlock_restart_vms.py | 58 - tests/api2/test_pool_expand.py | 53 - tests/api2/test_pool_export.py | 62 - tests/api2/test_pool_is_upgraded.py | 34 - .../test_pool_is_upgraded_alert_removal.py | 40 - tests/api2/test_pool_remove_disk.py | 13 - tests/api2/test_pool_replace_disk.py | 68 - tests/api2/test_pool_resilver.py | 12 - tests/api2/test_pool_spare.py | 40 - tests/api2/test_port_delegates.py | 62 - tests/api2/test_quotas.py | 118 - tests/api2/test_rate_limit.py | 56 - tests/api2/test_replication.py | 243 -- tests/api2/test_replication_role.py | 108 - tests/api2/test_replication_sudo.py | 54 - tests/api2/test_replication_utils.py | 31 - tests/api2/test_reporting_netdataweb.py | 29 - tests/api2/test_reporting_realtime.py | 19 - tests/api2/test_rest_api.py | 15 - tests/api2/test_rest_api_authentication.py | 152 - tests/api2/test_rest_api_download.py | 106 - tests/api2/test_rest_api_upload.py | 68 - tests/api2/test_rsync_ssh_authentication.py | 302 -- tests/api2/test_run_as_user_impl.py | 43 - tests/api2/test_schema_private.py | 39 - tests/api2/test_serial_consoles.py | 40 - ..._sharing_service_encrypted_dataset_info.py | 105 - tests/api2/test_simple_share.py | 47 - tests/api2/test_smart_test_crud.py | 100 - tests/api2/test_smart_test_run.py | 64 - tests/api2/test_smb_client.py | 243 -- tests/api2/test_smb_encryption.py | 133 - tests/api2/test_smb_groupmap.py | 73 - tests/api2/test_smb_share_crud_roles.py | 78 - tests/api2/test_snapshot_query.py | 60 - tests/api2/test_snapshot_task.py | 42 - tests/api2/test_snapshot_task_retention.py | 112 - tests/api2/test_snapshots.py | 56 - tests/api2/test_snmp_agent.py | 38 - tests/api2/test_system_advanced.py | 34 - tests/api2/test_system_dataset.py | 79 - .../api2/test_system_general_ui_allowlist.py | 84 - tests/api2/test_system_general_ui_rollback.py | 94 - tests/api2/test_system_settings_roles.py | 24 - tests/api2/test_system_vendor.py | 15 - tests/api2/test_truecommand_roles.py | 23 - tests/api2/test_tunables.py | 164 - tests/api2/test_twofactor_auth.py | 197 -- tests/api2/test_ui_caching.py | 35 - tests/api2/test_user_ssh_password.py | 48 - tests/api2/test_user_truenas_admin.py | 89 - tests/api2/test_vm_roles.py | 75 - tests/api2/test_vmware.py | 190 -- tests/api2/test_vmware_snapshot_delete.py | 69 - tests/api2/test_vmware_state.py | 33 - tests/api2/test_webui_crypto_service.py | 46 - tests/api2/test_zfs_dataset_list.py | 12 - tests/api2/test_zfs_snapshot_events.py | 74 - tests/api2/test_zfs_snapshot_hold.py | 34 - tests/api2/test_zpool_capacity_alert.py | 54 - 227 files changed, 33688 deletions(-) rename tests/api2/{test_snapshot_count_alert.py => test_008_snapshot_count_alert.py} (100%) delete mode 100644 tests/api2/test_009_fenced.py delete mode 100644 tests/api2/test_011_user.py delete mode 100644 tests/api2/test_012_directory_service_ssh.py delete mode 100644 tests/api2/test_014_failover_related.py delete mode 100644 tests/api2/test_015_services.py delete mode 100644 tests/api2/test_020_account.py delete mode 100644 tests/api2/test_030_activedirectory.py delete mode 100644 tests/api2/test_032_ad_kerberos.py delete mode 100644 tests/api2/test_035_ad_idmap.py delete mode 100644 tests/api2/test_040_ad_user_group_cache.py delete mode 100644 tests/api2/test_050_alert.py delete mode 100644 tests/api2/test_070_alertservice.py delete mode 100644 tests/api2/test_090_boot.py delete mode 100644 tests/api2/test_100_bootenv.py delete mode 100644 tests/api2/test_110_certificate.py delete mode 100644 tests/api2/test_120_certificateauthority.py delete mode 100644 tests/api2/test_140_core.py delete mode 100644 tests/api2/test_150_cronjob.py delete mode 100644 tests/api2/test_190_filesystem.py delete mode 100644 tests/api2/test_200_ftp.py delete mode 100644 tests/api2/test_210_group.py delete mode 100644 tests/api2/test_230_idmap.py delete mode 100644 tests/api2/test_260_iscsi.py delete mode 100644 tests/api2/test_261_iscsi_cmd.py delete mode 100644 tests/api2/test_262_iscsi_alua.py delete mode 100644 tests/api2/test_275_ldap.py delete mode 100644 tests/api2/test_278_freeipa.py delete mode 100644 tests/api2/test_290_mail.py delete mode 100644 tests/api2/test_300_nfs.py delete mode 100644 tests/api2/test_310_service_announcement.py delete mode 100644 tests/api2/test_330_pool_acltype.py delete mode 100644 tests/api2/test_341_pool_dataset_encryption.py delete mode 100644 tests/api2/test_344_acl_templates.py delete mode 100644 tests/api2/test_345_acl_nfs4.py delete mode 100644 tests/api2/test_347_posix_mode.py delete mode 100644 tests/api2/test_348_posix_acl.py delete mode 100644 tests/api2/test_350_pool_dataset_quota_alert.py delete mode 100644 tests/api2/test_360_pool_scrub.py delete mode 100644 tests/api2/test_410_smart.py delete mode 100644 tests/api2/test_420_smb.py delete mode 100644 tests/api2/test_425_smb_protocol.py delete mode 100644 tests/api2/test_426_smb_vss.py delete mode 100644 tests/api2/test_427_smb_acl.py delete mode 100644 tests/api2/test_428_smb_rpc.py delete mode 100644 tests/api2/test_430_smb_sharesec.py delete mode 100644 tests/api2/test_435_smb_registry.py delete mode 100644 tests/api2/test_438_snapshots.py delete mode 100644 tests/api2/test_440_snmp.py delete mode 100644 tests/api2/test_450_staticroutes.py delete mode 100644 tests/api2/test_470_system.py delete mode 100644 tests/api2/test_475_syslog.py delete mode 100644 tests/api2/test_490_system_general.py delete mode 100644 tests/api2/test_500_system_ntpservers.py delete mode 100644 tests/api2/test_530_ups.py delete mode 100644 tests/api2/test_541_vm.py delete mode 100644 tests/api2/test_550_vmware.py delete mode 100644 tests/api2/test_790_update.py delete mode 100644 tests/api2/test_900_docs.py delete mode 100644 tests/api2/test_999_pool_dataset_unlock.py delete mode 100644 tests/api2/test_account.py delete mode 100644 tests/api2/test_account_duplicate_uid_gid.py delete mode 100644 tests/api2/test_account_idmap.py delete mode 100644 tests/api2/test_account_privilege.py delete mode 100644 tests/api2/test_account_privilege_authentication.py delete mode 100644 tests/api2/test_account_privilege_role.py delete mode 100644 tests/api2/test_account_privilege_role_forbidden_fields.py delete mode 100644 tests/api2/test_account_privilege_role_private_fields.py delete mode 100644 tests/api2/test_account_query_roles.py delete mode 100644 tests/api2/test_account_root_password.py delete mode 100644 tests/api2/test_account_shell_choices.py delete mode 100644 tests/api2/test_account_ssh_key.py delete mode 100644 tests/api2/test_alert_classes.py delete mode 100644 tests/api2/test_api_key.py delete mode 100644 tests/api2/test_api_key_crud.py delete mode 100644 tests/api2/test_attachment_querying.py delete mode 100644 tests/api2/test_audit_alerts.py delete mode 100644 tests/api2/test_audit_api_key.py delete mode 100644 tests/api2/test_audit_audit.py delete mode 100644 tests/api2/test_audit_basic.py delete mode 100644 tests/api2/test_audit_dataset.py delete mode 100644 tests/api2/test_audit_ftp.py delete mode 100644 tests/api2/test_audit_iscsi.py delete mode 100644 tests/api2/test_audit_nfs.py delete mode 100644 tests/api2/test_audit_permission.py delete mode 100644 tests/api2/test_audit_rest.py delete mode 100644 tests/api2/test_audit_smb.py delete mode 100644 tests/api2/test_audit_sudo.py delete mode 100644 tests/api2/test_audit_websocket.py delete mode 100644 tests/api2/test_auth_me.py delete mode 100644 tests/api2/test_auth_otp.py delete mode 100644 tests/api2/test_auth_token.py delete mode 100644 tests/api2/test_block_hooks.py delete mode 100644 tests/api2/test_boot_attach_replace_detach.py delete mode 100644 tests/api2/test_boot_format.py delete mode 100644 tests/api2/test_bootenv.py delete mode 100644 tests/api2/test_can_access_as_user.py delete mode 100644 tests/api2/test_certificate_roles.py delete mode 100644 tests/api2/test_certs.py delete mode 100644 tests/api2/test_client_job.py delete mode 100644 tests/api2/test_cloud_backup.py delete mode 100644 tests/api2/test_cloud_sync.py delete mode 100644 tests/api2/test_cloud_sync_config.py delete mode 100644 tests/api2/test_cloud_sync_credentials.py delete mode 100644 tests/api2/test_cloud_sync_crud.py delete mode 100644 tests/api2/test_cloud_sync_custom_s3.py delete mode 100644 tests/api2/test_cloud_sync_script.py delete mode 100644 tests/api2/test_cloud_sync_storj.py delete mode 100644 tests/api2/test_config_upload.py delete mode 100644 tests/api2/test_core_bulk.py delete mode 100644 tests/api2/test_crud.py delete mode 100644 tests/api2/test_crud_events.py delete mode 100644 tests/api2/test_dataset_encryption_keys_in_replication.py delete mode 100644 tests/api2/test_dataset_mount.py delete mode 100644 tests/api2/test_dataset_unlock_validation.py delete mode 100644 tests/api2/test_device_get_disk_names.py delete mode 100644 tests/api2/test_device_get_disks_size.py delete mode 100644 tests/api2/test_disk_format.py delete mode 100644 tests/api2/test_disk_get_dev_size.py delete mode 100644 tests/api2/test_disk_temperature.py delete mode 100644 tests/api2/test_disk_wipe.py delete mode 100644 tests/api2/test_disk_zfs_guid.py delete mode 100644 tests/api2/test_draid.py delete mode 100644 tests/api2/test_draid_record_and_block_size.py delete mode 100644 tests/api2/test_enable_disable_services.py delete mode 100644 tests/api2/test_encrypted_dataset_services_restart.py delete mode 100644 tests/api2/test_events.py delete mode 100644 tests/api2/test_filesystem__file_tail_follow.py delete mode 100644 tests/api2/test_filesystem__put.py delete mode 100644 tests/api2/test_group_utils.py delete mode 100644 tests/api2/test_initshutdownscript.py delete mode 100644 tests/api2/test_ipa_join.py delete mode 100644 tests/api2/test_ipa_leave.py delete mode 100644 tests/api2/test_iscsi.py delete mode 100644 tests/api2/test_iscsi_auth_crud_roles.py delete mode 100644 tests/api2/test_iscsi_auth_network.py delete mode 100644 tests/api2/test_iscsi_extent_crud_roles.py delete mode 100644 tests/api2/test_iscsi_global_crud_roles.py delete mode 100644 tests/api2/test_iscsi_host_crud_roles.py delete mode 100644 tests/api2/test_iscsi_initiator_crud_roles.py delete mode 100644 tests/api2/test_iscsi_portal_crud_roles.py delete mode 100644 tests/api2/test_iscsi_target_crud_roles.py delete mode 100644 tests/api2/test_iscsi_targetextent_crud_roles.py delete mode 100644 tests/api2/test_job_credentials.py delete mode 100644 tests/api2/test_job_errno.py delete mode 100644 tests/api2/test_job_events.py delete mode 100644 tests/api2/test_job_lock.py delete mode 100644 tests/api2/test_job_logs.py delete mode 100644 tests/api2/test_job_result.py delete mode 100644 tests/api2/test_keychain_ssh.py delete mode 100644 tests/api2/test_localhost_ws_auth.py delete mode 100644 tests/api2/test_lock.py delete mode 100644 tests/api2/test_network_configuration.py delete mode 100644 tests/api2/test_nfs_share_crud_roles.py delete mode 100644 tests/api2/test_openapi.py delete mode 100644 tests/api2/test_password_reset.py delete mode 100644 tests/api2/test_pool_attach.py delete mode 100644 tests/api2/test_pool_dataset_acl.py delete mode 100644 tests/api2/test_pool_dataset_create.py delete mode 100644 tests/api2/test_pool_dataset_details.py delete mode 100644 tests/api2/test_pool_dataset_encrypted.py delete mode 100644 tests/api2/test_pool_dataset_info.py delete mode 100644 tests/api2/test_pool_dataset_processes.py delete mode 100644 tests/api2/test_pool_dataset_snapshot_count.py delete mode 100644 tests/api2/test_pool_dataset_track_processes.py delete mode 100644 tests/api2/test_pool_dataset_unlock_lock_immutable_flags.py delete mode 100644 tests/api2/test_pool_dataset_unlock_recursive.py delete mode 100644 tests/api2/test_pool_dataset_unlock_restart_vms.py delete mode 100644 tests/api2/test_pool_expand.py delete mode 100644 tests/api2/test_pool_export.py delete mode 100644 tests/api2/test_pool_is_upgraded.py delete mode 100644 tests/api2/test_pool_is_upgraded_alert_removal.py delete mode 100644 tests/api2/test_pool_remove_disk.py delete mode 100644 tests/api2/test_pool_replace_disk.py delete mode 100644 tests/api2/test_pool_resilver.py delete mode 100644 tests/api2/test_pool_spare.py delete mode 100644 tests/api2/test_port_delegates.py delete mode 100644 tests/api2/test_quotas.py delete mode 100644 tests/api2/test_rate_limit.py delete mode 100644 tests/api2/test_replication.py delete mode 100644 tests/api2/test_replication_role.py delete mode 100644 tests/api2/test_replication_sudo.py delete mode 100644 tests/api2/test_replication_utils.py delete mode 100644 tests/api2/test_reporting_netdataweb.py delete mode 100644 tests/api2/test_reporting_realtime.py delete mode 100644 tests/api2/test_rest_api.py delete mode 100644 tests/api2/test_rest_api_authentication.py delete mode 100644 tests/api2/test_rest_api_download.py delete mode 100644 tests/api2/test_rest_api_upload.py delete mode 100644 tests/api2/test_rsync_ssh_authentication.py delete mode 100644 tests/api2/test_run_as_user_impl.py delete mode 100644 tests/api2/test_schema_private.py delete mode 100644 tests/api2/test_serial_consoles.py delete mode 100644 tests/api2/test_sharing_service_encrypted_dataset_info.py delete mode 100644 tests/api2/test_simple_share.py delete mode 100644 tests/api2/test_smart_test_crud.py delete mode 100644 tests/api2/test_smart_test_run.py delete mode 100644 tests/api2/test_smb_client.py delete mode 100644 tests/api2/test_smb_encryption.py delete mode 100644 tests/api2/test_smb_groupmap.py delete mode 100644 tests/api2/test_smb_share_crud_roles.py delete mode 100644 tests/api2/test_snapshot_query.py delete mode 100644 tests/api2/test_snapshot_task.py delete mode 100644 tests/api2/test_snapshot_task_retention.py delete mode 100644 tests/api2/test_snapshots.py delete mode 100644 tests/api2/test_snmp_agent.py delete mode 100644 tests/api2/test_system_advanced.py delete mode 100644 tests/api2/test_system_dataset.py delete mode 100644 tests/api2/test_system_general_ui_allowlist.py delete mode 100644 tests/api2/test_system_general_ui_rollback.py delete mode 100644 tests/api2/test_system_settings_roles.py delete mode 100644 tests/api2/test_system_vendor.py delete mode 100644 tests/api2/test_truecommand_roles.py delete mode 100644 tests/api2/test_tunables.py delete mode 100644 tests/api2/test_twofactor_auth.py delete mode 100644 tests/api2/test_ui_caching.py delete mode 100644 tests/api2/test_user_ssh_password.py delete mode 100644 tests/api2/test_user_truenas_admin.py delete mode 100644 tests/api2/test_vm_roles.py delete mode 100644 tests/api2/test_vmware.py delete mode 100644 tests/api2/test_vmware_snapshot_delete.py delete mode 100644 tests/api2/test_vmware_state.py delete mode 100644 tests/api2/test_webui_crypto_service.py delete mode 100644 tests/api2/test_zfs_dataset_list.py delete mode 100644 tests/api2/test_zfs_snapshot_events.py delete mode 100644 tests/api2/test_zfs_snapshot_hold.py delete mode 100644 tests/api2/test_zpool_capacity_alert.py diff --git a/tests/api2/test_snapshot_count_alert.py b/tests/api2/test_008_snapshot_count_alert.py similarity index 100% rename from tests/api2/test_snapshot_count_alert.py rename to tests/api2/test_008_snapshot_count_alert.py diff --git a/tests/api2/test_009_fenced.py b/tests/api2/test_009_fenced.py deleted file mode 100644 index 6d2ebccabc007..0000000000000 --- a/tests/api2/test_009_fenced.py +++ /dev/null @@ -1,9 +0,0 @@ -import pytest - -from auto_config import ha -from middlewared.test.integration.utils import call - - -@pytest.mark.skipif(not ha, reason='HA only test') -def test_01_verify_fenced_is_running(): - assert call('failover.fenced.run_info')['running'] diff --git a/tests/api2/test_011_user.py b/tests/api2/test_011_user.py deleted file mode 100644 index 32c88c8d76e23..0000000000000 --- a/tests/api2/test_011_user.py +++ /dev/null @@ -1,722 +0,0 @@ -import contextlib -import dataclasses -import os -import time -import stat - -import pytest -from pytest_dependency import depends - -from truenas_api_client import ClientException -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.account import user as user_asset -from middlewared.test.integration.assets.pool import dataset as dataset_asset -from middlewared.test.integration.utils import call, ssh - -from functions import SSH_TEST, wait_on_job -from auto_config import pool_name, password, user -SHELL = '/usr/bin/bash' -VAR_EMPTY = '/var/empty' -ROOT_GROUP = 'root' -DEFAULT_HOMEDIR_OCTAL = 0o40700 -SMB_CONFIGURED_SENTINEL = '/var/run/samba/.configured' - - -@dataclasses.dataclass -class HomeAssets: - HOME_FILES = { - 'depends_name': '', - 'files': { - '~/': oct(DEFAULT_HOMEDIR_OCTAL), - '~/.profile': '0o100644', - '~/.ssh': '0o40700', - '~/.ssh/authorized_keys': '0o100600', - } - } - Dataset01 = { - 'depends_name': 'HOME_DS_CREATED', - 'create_payload': { - 'name': f'{pool_name}/test_homes', - 'share_type': 'SMB', - 'acltype': 'NFSV4', - 'aclmode': 'RESTRICTED' - }, - 'home_acl': [ - { - "tag": "owner@", - "id": None, - "type": "ALLOW", - "perms": {"BASIC": "FULL_CONTROL"}, - "flags": {"BASIC": "INHERIT"} - }, - { - "tag": "group@", - "id": None, - "type": "ALLOW", - "perms": {"BASIC": "FULL_CONTROL"}, - "flags": {"BASIC": "INHERIT"} - }, - { - "tag": "everyone@", - "id": None, - "type": "ALLOW", - "perms": {"BASIC": "TRAVERSE"}, - "flags": {"BASIC": "NOINHERIT"} - }, - ], - 'new_home': 'new_home', - } - - -@dataclasses.dataclass -class UserAssets: - TestUser01 = { - 'depends_name': 'user_01', - 'query_response': dict(), - 'get_user_obj_response': dict(), - 'create_payload': { - 'username': 'testuser', - 'full_name': 'Test User', - 'group_create': True, - 'password': 'test1234', - 'uid': None, - 'smb': False, - 'shell': SHELL - } - } - TestUser02 = { - 'depends_name': 'user_02', - 'query_response': dict(), - 'get_user_obj_response': dict(), - 'create_payload': { - 'username': 'testuser2', - 'full_name': 'Test User2', - 'group_create': True, - 'password': 'test1234', - 'uid': None, - 'shell': SHELL, - 'sshpubkey': 'canary', - 'home': f'/mnt/{HomeAssets.Dataset01["create_payload"]["name"]}', - 'home_mode': f'{stat.S_IMODE(DEFAULT_HOMEDIR_OCTAL):03o}', - 'home_create': True, - }, - 'filename': 'testfile_01', - } - ShareUser01 = { - 'depends_name': 'share_user_01', - 'query_response': dict(), - 'get_user_obj_reasponse': dict(), - 'create_payload': { - 'username': 'shareuser', - 'full_name': 'Share User', - 'group_create': True, - 'groups': [], - 'password': 'testing', - 'uid': None, - 'shell': SHELL - } - } - - -def check_config_file(file_name, expected_line): - results = SSH_TEST(f'cat {file_name}', user, password) - assert results['result'], results['output'] - assert expected_line in results['stdout'].splitlines(), results['output'] - - -@contextlib.contextmanager -def create_user_with_dataset(ds_info, user_info): - with dataset_asset(ds_info['name'], ds_info.get('options', []), **ds_info.get('kwargs', {})) as ds: - if 'path' in user_info: - user_info['payload']['home'] = os.path.join(f'/mnt/{ds}', user_info['path']) - - user_id = None - try: - user_id = call('user.create', user_info['payload']) - yield call('user.query', [['id', '=', user_id]], {'get': True}) - finally: - if user_id is not None: - call('user.delete', user_id, {"delete_group": True}) - - -@pytest.mark.dependency(name=UserAssets.TestUser01['depends_name']) -def test_001_create_and_verify_testuser(): - """ - Test for basic user creation. In this case 'smb' is disabled to bypass - passdb-related code. This is because the passdb add relies on users existing - in passwd database, and errors during error creation will get masked as - passdb errors. - """ - UserAssets.TestUser01['create_payload']['uid'] = call('user.get_next_uid') - call('user.create', UserAssets.TestUser01['create_payload']) - username = UserAssets.TestUser01['create_payload']['username'] - qry = call( - 'user.query', - [['username', '=', username]], - {'get': True, 'extra': {'additional_information': ['SMB']}} - ) - UserAssets.TestUser01['query_response'].update(qry) - - # verify basic info - for key in ('username', 'full_name', 'shell'): - assert qry[key] == UserAssets.TestUser01['create_payload'][key] - - # verify various /etc files were updated - for f in ( - { - 'file': '/etc/shadow', - 'value': f'{username}:{qry["unixhash"]}:18397:0:99999:7:::' - }, - { - 'file': '/etc/passwd', - 'value': f'{username}:x:{qry["uid"]}:{qry["group"]["bsdgrp_gid"]}:{qry["full_name"]}:{qry["home"]}:{qry["shell"]}' - }, - { - 'file': '/etc/group', - 'value': f'{qry["group"]["bsdgrp_group"]}:x:{qry["group"]["bsdgrp_gid"]}:' - } - ): - check_config_file(f['file'], f['value']) - - # verify password doesn't leak to middlewared.log - # we do this inside the create and verify function - # because this is severe enough problem that we should - # just "fail" at this step so it sets off a bunch of - # red flags in the CI - results = SSH_TEST( - f'grep -R {UserAssets.TestUser01["create_payload"]["password"]!r} /var/log/middlewared.log', - user, password - ) - assert results['result'] is False, str(results['output']) - - # non-smb users shouldn't show up in smb's passdb - assert qry['sid'] is None - - -def test_002_verify_user_exists_in_pwd(request): - """ - get_user_obj is a wrapper around the pwd module. - This check verifies that the user is _actually_ created. - """ - depends(request, [UserAssets.TestUser01['depends_name']]) - pw = call( - 'user.get_user_obj', - {'username': UserAssets.TestUser01['create_payload']['username'], 'sid_info': True} - ) - UserAssets.TestUser01['get_user_obj_response'].update(pw) - - # Verify pwd info - assert pw['pw_uid'] == UserAssets.TestUser01['query_response']['uid'] - assert pw['pw_shell'] == UserAssets.TestUser01['query_response']['shell'] - assert pw['pw_gecos'] == UserAssets.TestUser01['query_response']['full_name'] - assert pw['pw_dir'] == VAR_EMPTY - - # At this point, we're not an SMB user - assert pw['sid'] is not None - assert pw['source'] == 'LOCAL' - assert pw['local'] is True - - -def test_003_get_next_uid_again(request): - """user.get_next_uid should always return a unique uid""" - depends(request, [UserAssets.TestUser01['depends_name']]) - assert call('user.get_next_uid') != UserAssets.TestUser01['create_payload']['uid'] - - -def test_004_update_and_verify_user_groups(request): - """Add the user to the root users group""" - depends(request, [UserAssets.TestUser01['depends_name']]) - root_group_info = call( - 'group.query', [['group', '=', ROOT_GROUP]], {'get': True} - ) - call( - 'user.update', - UserAssets.TestUser01['query_response']['id'], - {'groups': [root_group_info['id']]} - ) - - grouplist = call( - 'user.get_user_obj', - {'username': UserAssets.TestUser01['create_payload']['username'], 'get_groups': True} - )['grouplist'] - assert root_group_info['gid'] in grouplist - - -@pytest.mark.dependency(name='SMB_CONVERT') -def test_005_convert_non_smbuser_to_smbuser(request): - depends(request, [UserAssets.TestUser01['depends_name']]) - with pytest.raises(ValidationErrors): - """ - SMB auth for local users relies on a stored NT hash. We only generate this hash - for SMB users. This means that converting from non-SMB to SMB requires - re-submitting password so that we can generate the required hash. If - payload submitted without password, then validation error _must_ be raised. - """ - call('user.update', UserAssets.TestUser01['query_response']['id'], {'smb': True}) - - rv = call( - 'user.update', - UserAssets.TestUser01['query_response']['id'], - {'smb': True, 'password': UserAssets.TestUser01['create_payload']['password']} - ) - assert rv - # TODO: why sleep here? - time.sleep(2) - - # verify converted smb user doesn't leak password - results = SSH_TEST( - f'grep -R {UserAssets.TestUser01["create_payload"]["password"]!r} /var/log/middlewared.log', - user, password - ) - assert results['result'] is False, str(results['output']) - - -def test_006_verify_converted_smbuser_passdb_entry_exists(request): - """ - At this point the non-SMB user has been converted to an SMB user. Verify - that a passdb entry was appropriately generated. - """ - depends(request, ['SMB_CONVERT', UserAssets.TestUser01['depends_name']]) - qry = call( - 'user.query', - [['username', '=', UserAssets.TestUser01['create_payload']['username']]], - {'get': True, 'extra': {'additional_information': ['SMB']}} - ) - assert qry - assert qry['sid'] - - -def test_007_add_smbuser_to_sudoers(request): - depends(request, ['SMB_CONVERT', UserAssets.TestUser01['depends_name']]) - username = UserAssets.TestUser01['create_payload']['username'] - # all sudo commands - call( - 'user.update', - UserAssets.TestUser01['query_response']['id'], - {'sudo_commands': ['ALL'], 'sudo_commands_nopasswd': []} - ) - check_config_file('/etc/sudoers', f"{username} ALL=(ALL) ALL") - - # all sudo commands no password - call( - 'user.update', - UserAssets.TestUser01['query_response']['id'], - {'sudo_commands': [], 'sudo_commands_nopasswd': ['ALL']} - ) - check_config_file('/etc/sudoers', f"{username} ALL=(ALL) NOPASSWD: ALL") - - # all sudo commands and all sudo commands no password - call( - 'user.update', - UserAssets.TestUser01['query_response']['id'], - {'sudo_commands': ['ALL'], 'sudo_commands_nopasswd': ['ALL']} - ) - check_config_file('/etc/sudoers', f"{username} ALL=(ALL) ALL, NOPASSWD: ALL") - - -def test_008_disable_smb_and_password(request): - depends(request, ['SMB_CONVERT', UserAssets.TestUser01['depends_name']]) - username = UserAssets.TestUser01['create_payload']['username'] - call( - 'user.update', - UserAssets.TestUser01['query_response']['id'], - {'password_disabled': True, 'smb': False} - ) - check_config_file('/etc/shadow', f'{username}:*:18397:0:99999:7:::') - - -@pytest.mark.parametrize('username', [UserAssets.TestUser01['create_payload']['username']]) -def test_009_delete_user(username, request): - depends(request, ['SMB_CONVERT', UserAssets.TestUser01['depends_name']]) - # delete the user first - call( - 'user.delete', - UserAssets.TestUser01['query_response']['id'], - {'delete_group': True} - ) - assert not call( - 'user.query', - [['username', '=', UserAssets.TestUser01['query_response']['username']]] - ) - - -# FIXME: why is this being called here randomly in the middle of this test? And why are we using REST? -# def test_25_has_local_administrator_set_up(request): - # depends(request, ["user_02", "user_01"]) - # assert GET('/user/has_local_administrator_set_up/', anonymous=True).json() is True - - -@pytest.mark.dependency(name=UserAssets.ShareUser01['depends_name']) -def test_020_create_and_verify_shareuser(): - UserAssets.ShareUser01['create_payload']['uid'] = call('user.get_next_uid') - UserAssets.ShareUser01['create_payload']['groups'].append( - call('group.query', [['group', '=', ROOT_GROUP]], {'get': True})['id'] - ) - - call('user.create', UserAssets.ShareUser01['create_payload']) - qry = call('user.query', [['username', '=', UserAssets.ShareUser01['create_payload']['username']]], {'get': True}) - UserAssets.ShareUser01['query_response'].update(qry) - - # verify basic info - for key in ('username', 'full_name', 'shell'): - assert qry[key] == UserAssets.ShareUser01['create_payload'][key] - - # verify password doesn't leak to middlewared.log - # we do this inside the create and verify function - # because this is severe enough problem that we should - # just "fail" at this step so it sets off a bunch of - # red flags in the CI - results = SSH_TEST( - f'grep -R {UserAssets.ShareUser01["create_payload"]["password"]!r} /var/log/middlewared.log', - user, password - ) - assert results['result'] is False, str(results['output']) - - -@pytest.mark.dependency(name=UserAssets.TestUser02['depends_name']) -def test_031_create_user_with_homedir(request): - """Create a zfs dataset to be used as a home directory for a - local user. The user's SMB share_type is selected for this test - so that we verify that ACL is being stripped properly from the - newly-created home directory.""" - # create the dataset - call('pool.dataset.create', HomeAssets.Dataset01['create_payload']) - call( - 'pool.dataset.permission', - HomeAssets.Dataset01['create_payload']['name'], - {'acl': HomeAssets.Dataset01['home_acl']}, - job=True - ) - # now create the user - UserAssets.TestUser02['create_payload']['uid'] = call('user.get_next_uid') - call('user.create', UserAssets.TestUser02['create_payload']) - qry = call( - 'user.query', - [['username', '=', UserAssets.TestUser02['create_payload']['username']]], - {'get': True, 'extra': {'additional_information': ['SMB']}} - ) - UserAssets.TestUser02['query_response'].update(qry) - - # verify basic info - for key in ('username', 'full_name', 'shell'): - assert qry[key] == UserAssets.TestUser02['create_payload'][key] - - # verify password doesn't leak to middlewared.log - # we do this here because this is severe enough - # problem that we should just "fail" at this step - # so it sets off a bunch of red flags in the CI - results = SSH_TEST( - f'grep -R {UserAssets.TestUser02["create_payload"]["password"]!r} /var/log/middlewared.log', - user, password - ) - assert results['result'] is False, str(results['output']) - - pw = call( - 'user.get_user_obj', - {'username': UserAssets.TestUser02['create_payload']['username'], 'sid_info': True} - ) - UserAssets.TestUser02['get_user_obj_response'].update(pw) - - # verify pwd - assert pw['pw_dir'] == os.path.join( - UserAssets.TestUser02['create_payload']['home'], UserAssets.TestUser02['create_payload']['username'] - ) - assert pw['pw_name'] == UserAssets.TestUser02['query_response']['username'] - assert pw['pw_uid'] == UserAssets.TestUser02['query_response']['uid'] - assert pw['pw_shell'] == UserAssets.TestUser02['query_response']['shell'] - assert pw['pw_gecos'] == UserAssets.TestUser02['query_response']['full_name'] - assert pw['sid'] is not None - assert pw['source'] == 'LOCAL' - assert pw['local'] is True - - # verify smb user passdb entry - assert qry['sid'] - - # verify homedir acl is stripped - st_info = call('filesystem.stat', UserAssets.TestUser02['query_response']['home']) - assert st_info['acl'] is False - - -def test_035_check_file_perms_in_homedir(request): - depends(request, [UserAssets.TestUser02['depends_name']]) - home_path = UserAssets.TestUser02['query_response']['home'] - for file, mode in HomeAssets.HOME_FILES['files'].items(): - st_info = call('filesystem.stat', os.path.join(home_path, file.removeprefix('~/'))) - assert oct(st_info['mode']) == mode, f"{file}: {st_info}" - assert st_info['uid'] == UserAssets.TestUser02['query_response']['uid'] - - -def test_036_create_testfile_in_homedir(request): - depends(request, [UserAssets.TestUser02['depends_name']]) - filename = UserAssets.TestUser02['filename'] - filepath = f'{UserAssets.TestUser02["query_response"]["home"]}/{filename}' - results = SSH_TEST( - f'touch {filepath}; chown {UserAssets.TestUser01["query_response"]["uid"]} {filepath}', - user, password - ) - assert results['result'] is True, results['output'] - assert call('filesystem.stat', filepath) - - -@pytest.mark.dependency(name="HOMEDIR2_EXISTS") -def test_037_move_homedir_to_new_directory(request): - depends(request, [UserAssets.TestUser02['depends_name']]) - - # Validation of autocreation of homedir during path update - with dataset_asset('temp_dataset_for_home') as ds: - new_home = os.path.join('/mnt', ds) - call( - 'user.update', - UserAssets.TestUser02['query_response']['id'], - {'home': new_home, 'home_create': True} - ) - - filters = [['method', '=', 'user.do_home_copy']] - opts = {'get': True, 'order_by': ['-id']} - move_job_timeout = 300 # 5 mins - move_job1 = call('core.get_jobs', filters, opts) - assert move_job1 - rv = wait_on_job(move_job1['id'], move_job_timeout) - assert rv['state'] == 'SUCCESS', f'JOB: {move_job1!r}, RESULT: {str(rv["results"])}' - - st_info = call('filesystem.stat', os.path.join(new_home, UserAssets.TestUser02['create_payload']['username'])) - assert st_info['uid'] == UserAssets.TestUser02['query_response']['uid'] - - # now kick the can down the road to the root of our pool - new_home = os.path.join('/mnt', pool_name) - call( - 'user.update', - UserAssets.TestUser02['query_response']['id'], - {'home': new_home, 'home_create': True} - ) - - move_job2 = call('core.get_jobs', filters, opts) - assert move_job2 - assert move_job1['id'] != move_job2['id'] - rv = wait_on_job(move_job2['id'], move_job_timeout) - assert rv['state'] == 'SUCCESS', f'JOB: {move_job2!r}, RESULT: {str(rv["results"])}' - - st_info = call('filesystem.stat', os.path.join(new_home, UserAssets.TestUser02['create_payload']['username'])) - assert st_info['uid'] == UserAssets.TestUser02['query_response']['uid'] - - -def test_038_change_homedir_to_existing_path(request): - depends(request, [UserAssets.ShareUser01['depends_name'], UserAssets.TestUser01['depends_name']]) - # Manually create a new home dir - new_home = os.path.join( - '/mnt', - HomeAssets.Dataset01['create_payload']['name'], - HomeAssets.Dataset01['new_home'] - ) - results = SSH_TEST(f'mkdir {new_home}', user, password) - assert results['result'] is True, results['output'] - - # Move the homedir to existing dir - call( - 'user.update', - UserAssets.TestUser02['query_response']['id'], - {'home': new_home} - ) - filters = [['method', '=', 'user.do_home_copy']] - opts = {'get': True, 'order_by': ['-id']} - move_job_timeout = 300 # 5 mins - home_move_job = call('core.get_jobs', filters, opts) - rv = wait_on_job(home_move_job['id'], move_job_timeout) - assert rv['state'] == 'SUCCESS', str(rv['results']) - - # verify files in the homedir that were moved are what we expect - for file, mode in HomeAssets.HOME_FILES['files'].items(): - st_info = call('filesystem.stat', os.path.join(new_home, file.removeprefix("~/"))) - assert oct(st_info['mode']) == mode, f"{file}: {st_info}" - assert st_info['uid'] == UserAssets.TestUser02['query_response']['uid'] - - # verify the specific file that existed in the previous homedir location was moved over - # NOTE: this file was created in test_036 - assert call('filesystem.stat', os.path.join(new_home, UserAssets.TestUser02['filename'])) - - -def test_041_lock_smb_user(request): - depends(request, [UserAssets.TestUser02['depends_name']], scope='session') - assert call('user.update', UserAssets.TestUser02['query_response']['id'], {'locked': True}) - username = UserAssets.TestUser02['create_payload']['username'] - check_config_file('/etc/shadow', f'{username}:!:18397:0:99999:7:::') - - username = UserAssets.TestUser02['create_payload']['username'] - for entry in call('smb.passdb_list', True): - if entry['Unix username'] == username: - my_entry = entry - break - else: - assert False, f'{username!r} not found in smb.passdb_list' - - assert my_entry["Account Flags"] == "[DU ]", str(my_entry) - - -def test_042_disable_smb_user(request): - depends(request, [UserAssets.TestUser02['depends_name']], scope='session') - assert call('user.update', UserAssets.TestUser02['query_response']['id'], {'smb': False}) - qry = call( - 'user.query', - [['username', '=', UserAssets.TestUser02['create_payload']['username']]], - {'get': True, 'extra': {'additional_information': ['SMB']}} - ) - assert qry - assert qry['sid'] is None - - -def test_043_raise_validation_error_on_homedir_collision(request): - """ - Verify that validation error is raised if homedir collides with existing one. - """ - depends(request, ['HOMEDIR2_EXISTS', UserAssets.TestUser02['depends_name']], scope='session') - # NOTE: this was used in test_038 - existing_home = os.path.join( - '/mnt', - HomeAssets.Dataset01['create_payload']['name'], - HomeAssets.Dataset01['new_home'] - ) - with pytest.raises(ValidationErrors): - call( - 'user.update', - UserAssets.ShareUser01['query_response']['id'], - {'home': existing_home} - ) - - -@pytest.mark.parametrize('username', [UserAssets.TestUser02['create_payload']['username']]) -def test_046_delete_homedir_user(username, request): - depends(request, [UserAssets.TestUser02['depends_name']], scope='session') - # delete user first - assert call( - 'user.delete', - UserAssets.TestUser02['query_response']['id'] - ) - - # now clean-up dataset that was used as homedir - assert call( - 'pool.dataset.delete', - UserAssets.TestUser02['create_payload']['home'].removeprefix('/mnt/') - ) - - -def test_050_verify_no_builtin_smb_users(request): - """ - We have builtin SMB groups, but should have no builtin - users. Failure here may indicate an issue with builtin user - synchronization code in middleware. Failure to catch this - may lead to accidentally granting SMB access to builtin - accounts. - """ - qry = call('user.query', [['builtin', '=', True], ['smb', '=', True]], {'count': True}) - assert qry == 0 - - -def test_058_create_new_user_knownfails(request): - """ - Specifying an existing path without home_create should - succeed and set mode to desired value. - """ - ds = {'pool': pool_name, 'name': 'user_test_exising_home_path'} - user_info = { - 'username': 't1', - "full_name": 'T1', - 'group_create': True, - 'password': 'test1234', - 'home_mode': '770' - } - with create_user_with_dataset(ds, {'payload': user_info, 'path': ''}) as user: - results = call('filesystem.stat', user['home']) - assert results['acl'] is False - assert f'{stat.S_IMODE(results["mode"]):03o}' == '770' - - # Attempting to repeat the same with new user should - # fail (no users may share same home path) - user2 = { - 'username': 't2', - 'full_name': 't2', - 'group_create': True, - 'password': 'test1234', - 'home': user['home'] - } - with pytest.raises(ValidationErrors): - # Attempting to repeat the same with new user should - # fail (no users may share same home path) - call('user.create', user2) - - with pytest.raises(ValidationErrors): - # Attempting to put homedir in subdirectory of existing homedir - # should also rase validation error - user2.update({'home_create': True}) - call('user.create', user2) - - with pytest.raises(ValidationErrors): - # Attempting to create a user with non-existing path - user2.update({'home': os.path.join(user2['home'], 'canary')}) - call('user.create', user2) - - -def test_059_create_user_ro_dataset(request): - with dataset_asset('ro_user_ds', {'readonly': 'ON'}) as ds: - with pytest.raises(ValidationErrors): - call('user.create', { - 'username': 't1', - 'full_name': 'T1', - 'group_create': True, - 'password': 'test1234', - 'home_mode': '770', - 'home_create': True, - 'home': f'/mnt/{ds}' - }) - - -def test_060_immutable_user_validation(request): - # the `news` user is immutable - immutable_id = call('user.query', [['username', '=', 'news']], {'get': True})['id'] - to_validate = [ - {'group': 1}, - {'home': '/mnt/tank', 'home_create': True}, - {'uid': 777777}, - {'smb': True}, - {'username': 'no_way_bad'}, - ] - for i in to_validate: - with pytest.raises(ValidationErrors) as ve: - call('user.update', immutable_id, i) - assert ve.value.errors[0].errmsg == 'This attribute cannot be changed' - - -@contextlib.contextmanager -def toggle_smb_configured(): - ssh(f'rm {SMB_CONFIGURED_SENTINEL}') - assert call('smb.is_configured') is False - try: - yield - finally: - call('smb.set_configured') - - -def test_061_check_smb_configured_sentinel(): - assert call('smb.is_configured') - with toggle_smb_configured(): - # Check that ValidationError is properly raised - with pytest.raises(ValidationErrors): - with user_asset({ - 'username': 'doug', - 'full_name': 'doug', - 'group_create': True, - 'password': 'squirrel', - 'smb': True - }, get_instance=False): - pass - - with pytest.raises(ClientException): - call('smb.synchronize_passdb', job=True) - - assert call('smb.is_configured') - call('smb.synchronize_passdb', job=True) - - -def test_099_cleanup_share_user(): - # we have a test that asserts there are no smb accounts created - # by the time it runs so clean up this account - call('user.delete', UserAssets.ShareUser01['query_response']['id']) diff --git a/tests/api2/test_012_directory_service_ssh.py b/tests/api2/test_012_directory_service_ssh.py deleted file mode 100644 index d95c26cb45078..0000000000000 --- a/tests/api2/test_012_directory_service_ssh.py +++ /dev/null @@ -1,64 +0,0 @@ -import pytest -from functions import SSH_TEST - -from middlewared.test.integration.assets.directory_service import active_directory, ldap -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.audit import expect_audit_method_calls - -try: - from config import AD_DOMAIN, ADPASSWORD, ADUSERNAME -except ImportError: - Reason = 'ADNameServer AD_DOMAIN, ADPASSWORD, or/and ADUSERNAME are missing in config.py"' - pytestmark = pytest.mark.skip(reason=Reason) - -try: - from config import ( - LDAPUSER, - LDAPPASSWORD - ) -except ImportError: - Reason = 'LDAP* variable are not setup in config.py' - pytestmark = pytest.mark.skipif(True, reason=Reason) - - -@pytest.fixture(scope="function") -def do_ad_connection(request): - with active_directory() as ad: - yield ad - - -@pytest.fixture(scope="function") -def do_ldap_connection(request): - with ldap() as ldap_conn: - yield ldap_conn - - -def test_08_test_ssh_ad(do_ad_connection): - userobj = do_ad_connection['user_obj'] - groupobj = call('group.get_group_obj', {'gid': userobj['pw_gid']}) - - payload = {"password_login_groups": [groupobj['gr_name']]} - - try: - with expect_audit_method_calls([{ - 'method': 'ssh.update', - 'params': [payload], - 'description': 'Update SSH configuration' - }]): - call('ssh.update', payload) - - results = SSH_TEST('ls -la', f'{ADUSERNAME}@{AD_DOMAIN}', ADPASSWORD) - finally: - call('ssh.update', {"password_login_groups": []}) - - assert results['result'] is True, results - - -def test_09_test_ssh_ldap(do_ldap_connection): - userobj = call('user.get_user_obj', {'username': LDAPUSER}) - groupobj = call('group.get_group_obj', {'gid': userobj['pw_gid']}) - call('ssh.update', {"password_login_groups": [groupobj['gr_name']]}) - cmd = 'ls -la' - results = SSH_TEST(cmd, LDAPUSER, LDAPPASSWORD) - call('ssh.update', {"password_login_groups": []}) - assert results['result'] is True, results diff --git a/tests/api2/test_014_failover_related.py b/tests/api2/test_014_failover_related.py deleted file mode 100644 index 07ce15a958de3..0000000000000 --- a/tests/api2/test_014_failover_related.py +++ /dev/null @@ -1,108 +0,0 @@ -import errno - -import pytest -from pytest_dependency import depends - -from functions import SSH_TEST -from auto_config import ha, user, password -from truenas_api_client import ClientException -from middlewared.test.integration.assets.account import unprivileged_user -from middlewared.test.integration.utils import call, client - - -@pytest.fixture(scope='module') -def readonly_admin(): - # READONLY role implies FAILOVER_READ - with unprivileged_user( - username='failover_guy', - group_name='failover_admins', - privilege_name='FAILOVER_PRIV', - allowlist=[], - web_shell=False, - roles=['READONLY_ADMIN'] - ) as acct: - yield acct - - -@pytest.mark.dependency(name='hactl_install_dir') -def test_01_check_hactl_installed(request): - rv = SSH_TEST('which hactl', user, password) - assert rv['stdout'].strip() == '/usr/local/sbin/hactl', rv['output'] - - -@pytest.mark.dependency(name='hactl_status') -def test_02_check_hactl_status(request): - depends(request, ['hactl_install_dir']) - rv = SSH_TEST('hactl', user, password) - output = rv['stdout'].strip() - if ha: - for i in ('Node status:', 'This node serial:', 'Other node serial:', 'Failover status:'): - assert i in output, output - else: - assert 'Not an HA node' in output, output - - -@pytest.mark.dependency(name='hactl_takeover') -def test_03_check_hactl_takeover(request): - # integration tests run against the master node (at least they should...) - depends(request, ['hactl_status']) - rv = SSH_TEST('hactl takeover', user, password) - output = rv['stdout'].strip() - if ha: - assert 'This command can only be run on the standby node.' in output, output - else: - assert 'Not an HA node' in output, output - - -@pytest.mark.dependency(name='hactl_enable') -def test_04_check_hactl_enable(request): - # integration tests run against the master node (at least they should...) - depends(request, ['hactl_takeover']) - rv = SSH_TEST('hactl enable', user, password) - output = rv['stdout'].strip() - if ha: - assert 'Failover already enabled.' in output, output - else: - assert 'Not an HA node' in output, output - - -def test_05_check_hactl_disable(request): - depends(request, ['hactl_enable']) - rv = SSH_TEST('hactl disable', user, password) - output = rv['stdout'].strip() - if ha: - assert 'Failover disabled.' in output, output - assert call('failover.config')['disabled'] is True - rv = SSH_TEST('hactl enable', user, password) - output = rv['stdout'].strip() - assert 'Failover enabled.' in output, output - assert call('failover.config')['disabled'] is False - else: - assert 'Not an HA node' in output, output - - -if ha: - def test_07_failover_replicate(): - old_ns = call('network.configuration.config')['nameserver3'] - new_ns = '1.1.1.1' - try: - call('network.configuration.update', {'nameserver3': new_ns}) - - remote = call('failover.call_remote', 'network.configuration.config') - assert remote['nameserver3'] == new_ns - assert remote['state']['nameserver3'] == new_ns - finally: - call('network.configuration.update', {'nameserver3': old_ns}) - remote = call('failover.call_remote', 'network.configuration.config') - assert remote['nameserver3'] == old_ns - assert remote['state']['nameserver3'] == old_ns - - def test_08_readonly_ops(request, readonly_admin): - with client(auth=(readonly_admin.username, readonly_admin.password)) as c: - c.call('failover.config') - c.call('failover.node') - c.call('failover.upgrade_pending') - with pytest.raises(ClientException) as ce: - c.call('failover.call_remote', 'user.update') - - assert ce.value.errno == errno.EACCES diff --git a/tests/api2/test_015_services.py b/tests/api2/test_015_services.py deleted file mode 100644 index 0fab84e856923..0000000000000 --- a/tests/api2/test_015_services.py +++ /dev/null @@ -1,83 +0,0 @@ -import time -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) - -import pytest - -from middlewared.service_exception import CallError -from middlewared.test.integration.utils import call, ssh - -def test_001_oom_check(): - pid = call('core.get_pid') - assert call('core.get_oom_score_adj', pid) == -1000 - -@pytest.mark.flaky(reruns=5, reruns_delay=5) # Sometimes systemd unit state is erroneously reported as active -def test_non_silent_service_start_failure(): - """ - This test for 2 conditions: - 1. middleware raises CallError that isn't empty - 2. each time a CallError is raised, the message - has a timestamp and that timestamp changes - with each failure - """ - with pytest.raises(CallError) as e: - call('service.start', 'ups', {'silent': False}) - - # Error looks like - """ - middlewared.service_exception.CallError: [EFAULT] Jan 10 08:49:14 systemd[1]: Starting Network UPS Tools - power device monitor and shutdown controller... - Jan 10 08:49:14 nut-monitor[3032658]: fopen /run/nut/upsmon.pid: No such file or directory - Jan 10 08:49:14 nut-monitor[3032658]: Unable to use old-style MONITOR line without a username - Jan 10 08:49:14 nut-monitor[3032658]: Convert it and add a username to upsd.users - see the documentation - Jan 10 08:49:14 nut-monitor[3032658]: Fatal error: unusable configuration - Jan 10 08:49:14 nut-monitor[3032658]: Network UPS Tools upsmon 2.7.4 - Jan 10 08:49:14 systemd[1]: nut-monitor.service: Control process exited, code=exited, status=1/FAILURE - Jan 10 08:49:14 systemd[1]: nut-monitor.service: Failed with result 'exit-code'. - Jan 10 08:49:14 systemd[1]: Failed to start Network UPS Tools - power device monitor and shutdown controller. - """ - lines1 = e.value.errmsg.splitlines() - first_ts, len_lines1 = ' '.join(lines1.pop(0).split()[:3]), len(lines1) - assert any('nut-monitor[' in line for line in lines1), lines1 - assert any('systemd[' in line for line in lines1), lines1 - - # make sure we don't trigger system StartLimitBurst threshold - # by removing this service from failed unit list (if it's there) - ssh('systemctl reset-failed nut-monitor') - - # we have to sleep 1 second here or the timestamp will be the - # same as when we first tried to start the service which is - # what we're testing to make sure the message is up to date - # with reality - time.sleep(1) - - with pytest.raises(CallError) as e: - call('service.start', 'ups', {'silent': False}) - - # Error looks like: (Notice timestamp change, which is what we verify - """ - middlewared.service_exception.CallError: [EFAULT] Jan 10 08:49:15 systemd[1]: Starting Network UPS Tools - power device monitor and shutdown controller... - Jan 10 08:49:15 nut-monitor[3032739]: fopen /run/nut/upsmon.pid: No such file or directory - Jan 10 08:49:15 nut-monitor[3032739]: Unable to use old-style MONITOR line without a username - Jan 10 08:49:15 nut-monitor[3032739]: Convert it and add a username to upsd.users - see the documentation - Jan 10 08:49:15 nut-monitor[3032739]: Fatal error: unusable configuration - Jan 10 08:49:15 nut-monitor[3032739]: Network UPS Tools upsmon 2.7.4 - Jan 10 08:49:15 systemd[1]: nut-monitor.service: Control process exited, code=exited, status=1/FAILURE - Jan 10 08:49:15 systemd[1]: nut-monitor.service: Failed with result 'exit-code'. - Jan 10 08:49:15 systemd[1]: Failed to start Network UPS Tools - power device monitor and shutdown controller. - """ - lines2 = e.value.errmsg.splitlines() - second_ts, len_lines2 = ' '.join(lines2.pop(0).split()[:3]), len(lines2) - assert any('nut-monitor[' in line for line in lines2), lines2 - assert any('systemd[' in line for line in lines2), lines2 - - # timestamp should change since we sleep(1) - assert first_ts != second_ts - - # the error messages will differ slightly (different PID for upsmon) but the number - # of lines should be the same - assert len_lines1 == len_lines2 - - # Stop the service to avoid syslog spam - call('service.stop', 'ups') diff --git a/tests/api2/test_020_account.py b/tests/api2/test_020_account.py deleted file mode 100644 index da69494c1c294..0000000000000 --- a/tests/api2/test_020_account.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD -# Location for tests into REST API of FreeNAS - -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import DELETE, GET, POST - - -def delete_group_delete_users(delete_users): - results = POST("/user/", { - "username": "test", - "group_create": True, - "full_name": "Test", - "smb": False, - "password_disabled": True, - }) - assert results.status_code == 200, results.text - user_id = results.json() - - results = GET(f"/user/id/{user_id}") - assert results.status_code == 200, results.text - group_id = results.json()["group"]["id"] - - results = DELETE(f"/group/id/{group_id}", {"delete_users": delete_users}) - assert results.status_code == 200, results.text - - return user_id, group_id - - -def test_01_delete_group_delete_users(): - user_id, group_id = delete_group_delete_users(True) - - results = GET(f"/user/id/{user_id}") - assert results.status_code == 404, results.text - - -def test_01_delete_group_no_delete_users(): - user_id, group_id = delete_group_delete_users(False) - - results = GET(f"/user/id/{user_id}") - assert results.status_code == 200, results.text - assert results.json()["group"]["bsdgrp_group"] in ["nogroup", "nobody"] - - results = DELETE(f"/user/id/{user_id}") - assert results.status_code == 200, results.text diff --git a/tests/api2/test_030_activedirectory.py b/tests/api2/test_030_activedirectory.py deleted file mode 100644 index 3f35c9d1ea9a4..0000000000000 --- a/tests/api2/test_030_activedirectory.py +++ /dev/null @@ -1,416 +0,0 @@ -import ipaddress -import os -from time import sleep - -import dns.resolver -import pytest -from truenas_api_client import \ - ValidationErrors as ClientValidationErrors -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.directory_service import ( - active_directory, override_nameservers) -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.assets.privilege import privilege -from middlewared.test.integration.assets.product import product_type -from middlewared.test.integration.utils import call, client, ssh -from middlewared.test.integration.utils.client import truenas_server -from middlewared.test.integration.utils.system import reset_systemd_svcs - -from auto_config import ha -from protocols import smb_connection, smb_share -from truenas_api_client import ClientException - -if ha and "hostname_virtual" in os.environ: - hostname = os.environ["hostname_virtual"] -else: - from auto_config import hostname - -try: - from config import AD_DOMAIN, ADPASSWORD, ADUSERNAME - AD_USER = fr"AD02\{ADUSERNAME.lower()}" -except ImportError: - Reason = 'ADNameServer AD_DOMAIN, ADPASSWORD, or/and ADUSERNAME are missing in config.py"' - pytestmark = pytest.mark.skip(reason=Reason) - - -SMB_NAME = "TestADShare" - - -def remove_dns_entries(payload): - call('dns.nsupdate', {'ops': payload}) - - -def cleanup_forward_zone(): - try: - result = call('dnsclient.forward_lookup', {'names': [f'{hostname}.{AD_DOMAIN}']}) - except dns.resolver.NXDOMAIN: - # No entry, nothing to do - return - - ips_to_remove = [rdata['address'] for rdata in result] - - payload = [] - for i in ips_to_remove: - addr = ipaddress.ip_address(i) - payload.append({ - 'command': 'DELETE', - 'name': f'{hostname}.{AD_DOMAIN}.', - 'address': str(addr), - 'type': 'A' if addr.version == 4 else 'AAAA' - }) - - remove_dns_entries(payload) - - -def check_ad_started(): - ds = call('directoryservices.status') - if ds['type'] is None: - return False - - assert ds['type'] == 'ACTIVEDIRECTORY' - assert ds['status'] == 'HEALTHY' - return True - - -def cleanup_reverse_zone(): - result = call('activedirectory.ipaddresses_to_register', {'hostname': f'{hostname}.{AD_DOMAIN}.', 'bindip': []}, False) - ptr_table = {f'{ipaddress.ip_address(i).reverse_pointer}.': i for i in result} - - try: - result = call('dnsclient.reverse_lookup', {'addresses': list(ptr_table.values())}) - except dns.resolver.NXDOMAIN: - # No entry, nothing to do - return - - payload = [] - for host in result: - reverse_pointer = host["name"] - assert reverse_pointer in ptr_table, str(ptr_table) - addr = ipaddress.ip_address(ptr_table[reverse_pointer]) - payload.append({ - 'command': 'DELETE', - 'name': host['target'], - 'address': str(addr), - 'type': 'A' if addr.version == 4 else 'AAAA' - }) - - remove_dns_entries(payload) - - -@pytest.fixture(scope="function") -def set_product_type(request): - if ha: - # HA product is already enterprise-licensed - yield - else: - with product_type(): - yield - - -@pytest.fixture(scope="function") -def set_ad_nameserver(request): - with override_nameservers() as ns: - yield (request, ns) - - -def test_cleanup_nameserver(set_ad_nameserver): - domain_info = call('activedirectory.domain_info', AD_DOMAIN) - - cred = call('kerberos.get_cred', {'dstype': 'ACTIVEDIRECTORY', - 'conf': {'bindname': ADUSERNAME, - 'bindpw': ADPASSWORD, - 'domainname': AD_DOMAIN - } - }) - - call('kerberos.do_kinit', {'krb5_cred': cred, - 'kinit-options': {'kdc_override': {'domain': AD_DOMAIN.upper(), - 'kdc': domain_info['KDC server'] - }, - } - }) - - # Now that we have proper kinit as domain admin - # we can nuke stale DNS entries from orbit. - # - cleanup_forward_zone() - cleanup_reverse_zone() - - -def test_enable_leave_activedirectory(): - reset_systemd_svcs('winbind') - assert check_ad_started() is False - - if not ha: - with pytest.raises(ValidationErrors): - # At this point we are not enterprise licensed - call("system.general.update", {"ds_auth": True}) - - short_name = None - - with active_directory(dns_timeout=15) as ad: - short_name = ad['dc_info']['Pre-Win2k Domain'] - - # Make sure we can read our secrets.tdb file - secrets_has_domain = call('directoryservices.secrets.has_domain', short_name) - assert secrets_has_domain is True - - # Check that our database has backup of this info written to it. - db_secrets = call('directoryservices.secrets.get_db_secrets')[f'{hostname.upper()}$'] - assert f'SECRETS/MACHINE_PASSWORD/{short_name}' in db_secrets - - # Last password change should be populated - passwd_change = call('directoryservices.get_last_password_change') - assert passwd_change['dbconfig'] is not None - assert passwd_change['secrets'] is not None - - # We should be able tZZo change some parameters when joined to AD - call('activedirectory.update', {'domainname': AD_DOMAIN, 'verbose_logging': True}, job=True) - - # Changing kerberos realm should raise ValidationError - with pytest.raises(ClientValidationErrors) as ve: - call('activedirectory.update', {'domainname': AD_DOMAIN, 'kerberos_realm': None}, job=True) - - assert ve.value.errors[0].errmsg.startswith('Kerberos realm may not be altered') - - # This should be caught by our catchall - with pytest.raises(ClientValidationErrors) as ve: - call('activedirectory.update', {'domainname': AD_DOMAIN, 'createcomputer': ''}, job=True) - - assert ve.value.errors[0].errmsg.startswith('Parameter may not be changed') - - assert check_ad_started() is True - - # Verify that idmapping is working - pw = ad['user_obj'] - - # Verify winbindd information - assert pw['sid'] is not None, str(ad) - assert not pw['sid'].startswith('S-1-22-1-'), str(ad) - assert pw['local'] is False - assert pw['source'] == 'ACTIVEDIRECTORY' - - result = call('dnsclient.forward_lookup', {'names': [f'{hostname}.{AD_DOMAIN}']}) - assert len(result) != 0 - - addresses = [x['address'] for x in result] - assert truenas_server.ip in addresses - - res = call('privilege.query', [['name', 'C=', AD_DOMAIN]], {'get': True}) - assert res['ds_groups'][0]['name'].endswith('domain admins') - assert res['ds_groups'][0]['sid'].endswith('512') - assert res['allowlist'][0] == {'method': '*', 'resource': '*'} - - assert check_ad_started() is False - - secrets_has_domain = call('directoryservices.secrets.has_domain', short_name) - assert secrets_has_domain is False - - with pytest.raises(KeyError): - call('user.get_user_obj', {'username': AD_USER}) - - result = call('privilege.query', [['name', 'C=', AD_DOMAIN]]) - assert len(result) == 0, str(result) - - -def test_activedirectory_smb_ops(): - reset_systemd_svcs('winbind') - with active_directory(dns_timeout=15) as ad: - short_name = ad['dc_info']['Pre-Win2k Domain'] - machine_password_key = f'SECRETS/MACHINE_PASSWORD/{short_name}' - running_pwd = call('directoryservices.secrets.dump')[machine_password_key] - db_pwd = call('directoryservices.secrets.get_db_secrets')[f'{hostname.upper()}$'][machine_password_key] - - # We've joined and left AD already. Verify secrets still getting backed up correctly. - assert running_pwd == db_pwd - - with dataset( - "ad_smb", - {'share_type': 'SMB'}, - acl=[{ - 'tag': 'GROUP', - 'id': ad['user_obj']['pw_uid'], - 'perms': {'BASIC': 'FULL_CONTROL'}, - 'flags': {'BASIC': 'INHERIT'}, - 'type': 'ALLOW' - }] - ) as ds: - call('service.restart', 'cifs') - - with smb_share(f'/mnt/{ds}', {'name': SMB_NAME}): - with smb_connection( - host=truenas_server.ip, - share=SMB_NAME, - username=ADUSERNAME, - domain='AD02', - password=ADPASSWORD - ) as c: - fd = c.create_file('testfile.txt', 'w') - c.write(fd, b'foo') - val = c.read(fd, 0, 3) - c.close(fd, True) - assert val == b'foo' - - c.mkdir('testdir') - fd = c.create_file('testdir/testfile2.txt', 'w') - c.write(fd, b'foo2') - val = c.read(fd, 0, 4) - c.close(fd, True) - assert val == b'foo2' - - c.rmdir('testdir') - - with dataset( - "ad_datasets", - {'share_type': 'SMB'}, - acl=[{ - 'tag': 'GROUP', - 'id': ad['user_obj']['pw_uid'], - 'perms': {'BASIC': 'FULL_CONTROL'}, - 'flags': {'BASIC': 'INHERIT'}, - 'type': 'ALLOW' - }] - ) as ds: - with smb_share(f'/mnt/{ds}', { - 'name': 'DATASETS', - 'purpose': 'NO_PRESET', - 'auxsmbconf': 'zfs_core:zfs_auto_create = true', - 'path_suffix': '%D/%U' - }): - with smb_connection( - host=truenas_server.ip, - share='DATASETS', - username=ADUSERNAME, - domain='AD02', - password=ADPASSWORD - ) as c: - fd = c.create_file('nested_test_file', "w") - c.write(fd, b'EXTERNAL_TEST') - c.close(fd) - - acl = call('filesystem.getacl', os.path.join(f'/mnt/{ds}', 'AD02', ADUSERNAME), True) - assert acl['trivial'] is False, str(acl) - - with dataset( - "ad_home", - {'share_type': 'SMB'}, - acl=[{ - 'tag': 'GROUP', - 'id': ad['user_obj']['pw_uid'], - 'perms': {'BASIC': 'FULL_CONTROL'}, - 'flags': {'BASIC': 'INHERIT'}, - 'type': 'ALLOW' - }] - ) as ds: - - with smb_share(f'/mnt/{ds}', { - 'name': 'TEST_HOME', - 'purpose': 'NO_PRESET', - 'home': True, - }): - # must refresh idmap cache to get new homedir from NSS - # this means we may need a few seconds for winbindd - # service to settle down on slow systems (like our CI VMs) - sleep(10 if ha else 5) - - with smb_connection( - host=truenas_server.ip, - share='HOMES', - username=ADUSERNAME, - domain='AD02', - password=ADPASSWORD - ) as c: - fd = c.create_file('homes_test_file', "w") - c.write(fd, b'EXTERNAL_TEST') - c.close(fd) - - file_local_path = os.path.join(f'/mnt/{ds}', 'AD02', ADUSERNAME, 'homes_test_file') - acl = call('filesystem.getacl', file_local_path, True) - assert acl['trivial'] is False, str(acl) - - -def test_account_privilege_authentication(set_product_type): - reset_systemd_svcs('winbind smbd') - - with active_directory(dns_timeout=15): - call("system.general.update", {"ds_auth": True}) - nusers = call("user.query", [["local", "=", False]], {"count": True}) - assert nusers > 0 - ngroups = call("group.query", [["local", "=", False]], {"count": True}) - assert ngroups > 0 - try: - # RID 513 is constant for "Domain Users" - domain_sid = call("idmap.domain_info", AD_DOMAIN.split(".")[0])['sid'] - with privilege({ - "name": "AD privilege", - "local_groups": [], - "ds_groups": [f"{domain_sid}-513"], - "allowlist": [ - {"method": "CALL", "resource": "system.info"}, - {"method": "CALL", "resource": "user.query"}, - {"method": "CALL", "resource": "group.query"}, - ], - "web_shell": False, - }): - with client(auth=(f"limiteduser@{AD_DOMAIN}", ADPASSWORD)) as c: - methods = c.call("core.get_methods") - me = c.call("auth.me") - - assert 'DIRECTORY_SERVICE' in me['account_attributes'] - assert 'ACTIVE_DIRECTORY' in me['account_attributes'] - - assert len(c.call("user.query", [["local", "=", False]])) == nusers - assert len(c.call("group.query", [["local", "=", False]])) == ngroups - - assert "system.info" in methods - assert "pool.create" not in methods - - # ADUSERNAME is member of domain admins and will have - # all privileges - with client(auth=(f"{ADUSERNAME}@{AD_DOMAIN}", ADPASSWORD)) as c: - methods = c.call("core.get_methods") - - assert "pool.create" in methods - - # Alternative formatting for user name \. - # this should also work for auth - with client(auth=(AD_USER, ADPASSWORD)) as c: - methods = c.call("core.get_methods") - - assert "pool.create" in methods - - finally: - call("system.general.update", {"ds_auth": False}) - - -def test_secrets_restore(): - - with active_directory(): - reset_systemd_svcs('winbind smbd') - assert check_ad_started() is True - - ssh('rm /var/db/system/samba4/private/secrets.tdb') - - with pytest.raises(ClientException): - call('directoryservices.health.check') - - call('directoryservices.health.recover') - - assert check_ad_started() is True - - -def test_keytab_restore(): - - with active_directory(): - reset_systemd_svcs('winbind smbd') - assert check_ad_started() is True - - kt_id = call('kerberos.keytab.query', [['name', '=', 'AD_MACHINE_ACCOUNT']], {'get': True})['id'] - - # delete our keytab from datastore - call('datastore.delete', 'directoryservice.kerberoskeytab', kt_id) - - call('directoryservices.health.recover') - - # verify that it was recreated during health check - call('kerberos.keytab.query', [['name', '=', 'AD_MACHINE_ACCOUNT']], {'get': True}) diff --git a/tests/api2/test_032_ad_kerberos.py b/tests/api2/test_032_ad_kerberos.py deleted file mode 100644 index 9b1e889fd52ab..0000000000000 --- a/tests/api2/test_032_ad_kerberos.py +++ /dev/null @@ -1,355 +0,0 @@ -import os -import sys - -import pytest - -from middlewared.test.integration.assets.pool import dataset - -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import SSH_TEST -from auto_config import hostname, password, user -from contextlib import contextmanager -from base64 import b64decode -from protocols import nfs_share -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.utils import call -from middlewared.test.integration.assets.directory_service import active_directory - -try: - from config import AD_DOMAIN, ADPASSWORD, ADUSERNAME, AD_COMPUTER_OU -except ImportError: - pytestmark = pytest.mark.skip(reason='Missing AD configuration') - -SAMPLE_KEYTAB = "BQIAAABTAAIAC0hPTUVET00uRlVOABFyZXN0cmljdGVka3JiaG9zdAASdGVzdDQ5LmhvbWVkb20uZnVuAAAAAV8kEroBAAEACDHN3Kv9WKLLAAAAAQAAAAAAAABHAAIAC0hPTUVET00uRlVOABFyZXN0cmljdGVka3JiaG9zdAAGVEVTVDQ5AAAAAV8kEroBAAEACDHN3Kv9WKLLAAAAAQAAAAAAAABTAAIAC0hPTUVET00uRlVOABFyZXN0cmljdGVka3JiaG9zdAASdGVzdDQ5LmhvbWVkb20uZnVuAAAAAV8kEroBAAMACDHN3Kv9WKLLAAAAAQAAAAAAAABHAAIAC0hPTUVET00uRlVOABFyZXN0cmljdGVka3JiaG9zdAAGVEVTVDQ5AAAAAV8kEroBAAMACDHN3Kv9WKLLAAAAAQAAAAAAAABbAAIAC0hPTUVET00uRlVOABFyZXN0cmljdGVka3JiaG9zdAASdGVzdDQ5LmhvbWVkb20uZnVuAAAAAV8kEroBABEAEBDQOH+tKYCuoedQ53WWKFgAAAABAAAAAAAAAE8AAgALSE9NRURPTS5GVU4AEXJlc3RyaWN0ZWRrcmJob3N0AAZURVNUNDkAAAABXyQSugEAEQAQENA4f60pgK6h51DndZYoWAAAAAEAAAAAAAAAawACAAtIT01FRE9NLkZVTgARcmVzdHJpY3RlZGtyYmhvc3QAEnRlc3Q0OS5ob21lZG9tLmZ1bgAAAAFfJBK6AQASACCKZTjTnrjT30jdqAG2QRb/cFyTe9kzfLwhBAm5QnuMiQAAAAEAAAAAAAAAXwACAAtIT01FRE9NLkZVTgARcmVzdHJpY3RlZGtyYmhvc3QABlRFU1Q0OQAAAAFfJBK6AQASACCKZTjTnrjT30jdqAG2QRb/cFyTe9kzfLwhBAm5QnuMiQAAAAEAAAAAAAAAWwACAAtIT01FRE9NLkZVTgARcmVzdHJpY3RlZGtyYmhvc3QAEnRlc3Q0OS5ob21lZG9tLmZ1bgAAAAFfJBK6AQAXABAcyjciCUnM9DmiyiPO4VIaAAAAAQAAAAAAAABPAAIAC0hPTUVET00uRlVOABFyZXN0cmljdGVka3JiaG9zdAAGVEVTVDQ5AAAAAV8kEroBABcAEBzKNyIJScz0OaLKI87hUhoAAAABAAAAAAAAAEYAAgALSE9NRURPTS5GVU4ABGhvc3QAEnRlc3Q0OS5ob21lZG9tLmZ1bgAAAAFfJBK6AQABAAgxzdyr/ViiywAAAAEAAAAAAAAAOgACAAtIT01FRE9NLkZVTgAEaG9zdAAGVEVTVDQ5AAAAAV8kEroBAAEACDHN3Kv9WKLLAAAAAQAAAAAAAABGAAIAC0hPTUVET00uRlVOAARob3N0ABJ0ZXN0NDkuaG9tZWRvbS5mdW4AAAABXyQSugEAAwAIMc3cq/1YossAAAABAAAAAAAAADoAAgALSE9NRURPTS5GVU4ABGhvc3QABlRFU1Q0OQAAAAFfJBK6AQADAAgxzdyr/ViiywAAAAEAAAAAAAAATgACAAtIT01FRE9NLkZVTgAEaG9zdAASdGVzdDQ5LmhvbWVkb20uZnVuAAAAAV8kEroBABEAEBDQOH+tKYCuoedQ53WWKFgAAAABAAAAAAAAAEIAAgALSE9NRURPTS5GVU4ABGhvc3QABlRFU1Q0OQAAAAFfJBK6AQARABAQ0Dh/rSmArqHnUOd1lihYAAAAAQAAAAAAAABeAAIAC0hPTUVET00uRlVOAARob3N0ABJ0ZXN0NDkuaG9tZWRvbS5mdW4AAAABXyQSugEAEgAgimU40564099I3agBtkEW/3Bck3vZM3y8IQQJuUJ7jIkAAAABAAAAAAAAAFIAAgALSE9NRURPTS5GVU4ABGhvc3QABlRFU1Q0OQAAAAFfJBK6AQASACCKZTjTnrjT30jdqAG2QRb/cFyTe9kzfLwhBAm5QnuMiQAAAAEAAAAAAAAATgACAAtIT01FRE9NLkZVTgAEaG9zdAASdGVzdDQ5LmhvbWVkb20uZnVuAAAAAV8kEroBABcAEBzKNyIJScz0OaLKI87hUhoAAAABAAAAAAAAAEIAAgALSE9NRURPTS5GVU4ABGhvc3QABlRFU1Q0OQAAAAFfJBK6AQAXABAcyjciCUnM9DmiyiPO4VIaAAAAAQAAAAAAAAA1AAEAC0hPTUVET00uRlVOAAdURVNUNDkkAAAAAV8kEroBAAEACDHN3Kv9WKLLAAAAAQAAAAAAAAA1AAEAC0hPTUVET00uRlVOAAdURVNUNDkkAAAAAV8kEroBAAMACDHN3Kv9WKLLAAAAAQAAAAAAAAA9AAEAC0hPTUVET00uRlVOAAdURVNUNDkkAAAAAV8kEroBABEAEBDQOH+tKYCuoedQ53WWKFgAAAABAAAAAAAAAE0AAQALSE9NRURPTS5GVU4AB1RFU1Q0OSQAAAABXyQSugEAEgAgimU40564099I3agBtkEW/3Bck3vZM3y8IQQJuUJ7jIkAAAABAAAAAAAAAD0AAQALSE9NRURPTS5GVU4AB1RFU1Q0OSQAAAABXyQSugEAFwAQHMo3IglJzPQ5osojzuFSGgAAAAEAAAAA" # noqa - -SAMPLEDOM_NAME = "CANARY.FUN" -SAMPLEDOM_REALM = { - "realm": SAMPLEDOM_NAME, - "kdc": ["169.254.100.1", "169.254.100.2", "169.254.100.3"], - "admin_server": ["169.254.100.10", "169.254.100.11", "169.254.100.12"], - "kpasswd_server": ["169.254.100.20", "169.254.100.21", "169.254.100.22"], -} - - -APPDEFAULTS_PAM_OVERRIDE = """ -pam = { - forwardable = false - ticket_lifetime = 36000 -} -""" - - -def get_export_sec(exports_config): - sec_entry = None - for entry in exports_config.splitlines(): - if not entry.startswith("\t"): - continue - - line = entry.strip().split("(")[1] - sec_entry = line.split(",")[0] - break - - return sec_entry - - -def regenerate_exports(): - # NFS service isn't running for these tests - # and so exports aren't updated. Force the update. - call('etc.generate', 'nfsd') - - -def check_export_sec(expected): - regenerate_exports() - results = SSH_TEST('cat /etc/exports', user, password) - assert results['result'] is True, results['stderr'] - exports_config = results['stdout'].strip() - sec = get_export_sec(exports_config) - assert sec == expected, exports_config - - -def parse_krb5_conf(fn, split=None, state=None): - results = SSH_TEST('cat /etc/krb5.conf', user, password) - assert results['result'] is True, results['output'] - - if split: - krb5conf_lines = results['stdout'].split(split) - else: - krb5conf_lines = results['stdout'].splitlines() - - for idx, entry in enumerate(krb5conf_lines): - fn(krb5conf_lines, idx, entry, state) - - return results['output'] - - -@contextmanager -def add_kerberos_keytab(ktname): - kt = call('kerberos.keytab.create', { - "name": ktname, - "file": SAMPLE_KEYTAB - }) - try: - yield kt - finally: - call('kerberos.keytab.delete', kt['id']) - - -@contextmanager -def add_kerberos_realm(realm_name): - realm = call('kerberos.realm.create', { - 'realm': realm_name, - }) - try: - yield realm - finally: - call('kerberos.realm.delete', realm['id']) - - -@pytest.fixture(scope="function") -def do_ad_connection(request): - with active_directory( - AD_DOMAIN, - ADUSERNAME, - ADPASSWORD, - netbiosname=hostname, - createcomputer=AD_COMPUTER_OU, - ) as ad: - yield (request, ad) - - -def test_kerberos_keytab_and_realm(do_ad_connection): - - def krb5conf_parser(krb5conf_lines, idx, entry, state): - if entry.lstrip() == f"kdc = {SAMPLEDOM_REALM['kdc'][0]}": - assert krb5conf_lines[idx + 1].lstrip() == f"kdc = {SAMPLEDOM_REALM['kdc'][1]}" - assert krb5conf_lines[idx + 2].lstrip() == f"kdc = {SAMPLEDOM_REALM['kdc'][2]}" - state['has_kdc'] = True - - if entry.lstrip() == f"admin_server = {SAMPLEDOM_REALM['admin_server'][0]}": - assert krb5conf_lines[idx + 1].lstrip() == f"admin_server = {SAMPLEDOM_REALM['admin_server'][1]}" - assert krb5conf_lines[idx + 2].lstrip() == f"admin_server = {SAMPLEDOM_REALM['admin_server'][2]}" - state['has_admin_server'] = True - - if entry.lstrip() == f"kpasswd_server = {SAMPLEDOM_REALM['kpasswd_server'][0]}": - assert krb5conf_lines[idx + 1].lstrip() == f"kpasswd_server = {SAMPLEDOM_REALM['kpasswd_server'][1]}" - assert krb5conf_lines[idx + 2].lstrip() == f"kpasswd_server = {SAMPLEDOM_REALM['kpasswd_server'][2]}" - state['has_kpasswd_server'] = True - - call('directoryservices.status')['status'] == 'HEALTHY' - """ - The keytab in this case is a b64encoded keytab file. - AD_MACHINE_ACCOUNT is automatically generated during domain - join and uploaded into our configuration database. This - test checks for its presence and that it's validly b64 encoded. - The process of decoding and adding to system keytab is tested - in later kerberos tests. "kerberos.start" will decode, write - to system keytab, and kinit. So in this case, proper function - can be determined by printing contents of system keytab and - verifying that we were able to get a kerberos ticket. - """ - kt = call('kerberos.keytab.query', [['name', '=', 'AD_MACHINE_ACCOUNT']], {'get': True}) - b64decode(kt['file']) - - """ - kerberos_principal_choices lists unique keytab principals in - the system keytab. AD_MACHINE_ACCOUNT should add more than - one principal. - """ - orig_kt = call('kerberos.keytab.kerberos_principal_choices') - assert orig_kt != [] - - """ - kerberos.check_ticket performs a platform-independent verification - of kerberos ticket. - """ - call('kerberos.check_ticket') - - """ - Test uploading b64encoded sample kerberos keytab included - at top of this file. In the next series of tests we will - upload, validate that it was uploaded, and verify that the - keytab is read back correctly. - """ - with add_kerberos_keytab('KT2'): - kt2 = call('kerberos.keytab.query', [['name', '=', 'KT2']], {'get': True}) - b64decode(kt2['file']) - assert kt2['file'] == SAMPLE_KEYTAB - - """ - AD Join should automatically add a kerberos realm - for the AD domain. - """ - call('kerberos.realm.query', [['realm', '=', AD_DOMAIN.upper()]], {'get': True}) - - with add_kerberos_realm(SAMPLEDOM_NAME) as new_realm: - payload = SAMPLEDOM_REALM.copy() - payload.pop("realm") - call('kerberos.realm.update', new_realm['id'], payload) - - r = call('kerberos.realm.query', [['realm', '=', SAMPLEDOM_NAME]], {'get': True}) - r.pop('id') - assert r == SAMPLEDOM_REALM - - # Verify realms properly added to krb5.conf - iter_state = { - 'has_kdc': False, - 'has_admin_server': False, - 'has_kpasswd_server': False - } - output = parse_krb5_conf(krb5conf_parser, state=iter_state) - - assert iter_state['has_kdc'] is True, output - assert iter_state['has_admin_server'] is True, output - assert iter_state['has_kpasswd_server'] is True, output - - assert len(call('kerberos.realm.query', [['realm', '=', SAMPLEDOM_NAME]])) == 0 - - -def test_kerberos_krbconf(do_ad_connection): - def parser_1(unused, idx, sec, state): - if not sec.startswith("appdefaults"): - return - - for entry in sec.splitlines(): - if entry.lstrip().startswith('}'): - break - - if entry.strip() == "forwardable = false": - state['has_forwardable'] = True - - if entry.strip() == "ticket_lifetime = 36000": - state['has_ticket_lifetime'] = True - - def parse_section(unused, idx, sec, state): - if not sec.startswith(state['section']): - return - - for entry in sec.splitlines(): - if entry.strip() == state['to_check']: - state['found'] = True - break - - """ - Test of more complex auxiliary parameter parsing that allows - users to override our defaults. - """ - - call('kerberos.update', {'appdefaults_aux': APPDEFAULTS_PAM_OVERRIDE}) - - iter_state = { - 'has_forwardable': False, - 'has_ticket_lifetime': False - } - - output = parse_krb5_conf(parser_1, split='[', state=iter_state) - - assert iter_state['has_forwardable'] is True, output - assert iter_state['has_ticket_lifetime'] is True, output - - call('kerberos.update', {'appdefaults_aux': 'encrypt = true'}) - - iter_state = { - 'section': 'appdefaults', - 'found': False, - 'to_check': 'encrypt = true' - } - - output = parse_krb5_conf(parse_section, split='[', state=iter_state) - assert iter_state['found'] is True, output - - call('kerberos.update', {'libdefaults_aux': 'rdns = true'}) - - iter_state = { - 'section': 'libdefaults', - 'found': False, - 'to_check': 'rdns = true' - } - output = parse_krb5_conf(parse_section, split='[', state=iter_state) - assert iter_state['found'] is True, output - - -def test_invalid_aux(): - call('kerberos.update', {'appdefaults_aux': '', 'libdefaults_aux': ''}) - - # check that parser raises validation errors - with pytest.raises(ValidationErrors): - call('kerberos.update', {'appdefaults_aux': 'canary = true'}) - - with pytest.raises(ValidationErrors): - call('kerberos.update', {'libdefaults_aux': 'canary = true'}) - - -def test_kerberos_nfs4(do_ad_connection): - assert call('kerberos.keytab.has_nfs_principal') is True - - with dataset('AD_NFS') as ds: - with nfs_share(f'/mnt/{ds}', options={'comment': 'KRB Test Share'}): - call('nfs.update', {"protocols": ["NFSV3", "NFSV4"]}) - - """ - First NFS exports check. In this situation we are joined to - AD and therefore have a keytab with NFS entry - - Expected security is: - "V4: / -sec=sys:krb5:krb5i:krb5p" - """ - check_export_sec('sec=sys:krb5:krb5i:krb5p') - - call('nfs.update', {"v4_krb": True}) - - """ - Second NFS exports check. We now have an NFS SPN entry - Expected security is: - "V4: / -sec=krb5:krb5i:krb5p" - """ - check_export_sec('sec=krb5:krb5i:krb5p') - - """ - v4_krb_enabled should still be True after this - disabling v4_krb because we still have an nfs - service principal in our keytab. - """ - data = call('nfs.update', {'v4_krb': False}) - assert data['v4_krb_enabled'] is True, str(data) - - """ - Third NFS exports check. We now have an NFS SPN entry - but v4_krb is disabled. - Expected security is: - "V4: / -sec=sys:krb5:krb5i:krb5p" - """ - check_export_sec('sec=sys:krb5:krb5i:krb5p') - - -def test_verify_nfs_krb_disabled(): - """ - This test checks that we no longer are flagged as having - v4_krb_enabled now that we are not joined to AD. - """ - assert call('nfs.config')['v4_krb_enabled'] is False - - -def test_kerberos_ticket_management(do_ad_connection): - klist_out = call('kerberos.klist') - assert klist_out['default_principal'].startswith(hostname.upper()), str(klist_out) - assert klist_out['ticket_cache']['type'] == 'KEYRING' - assert klist_out['ticket_cache']['name'].startswith('persistent:0') - assert len(klist_out['tickets']) != 0 - - to_check = None - for tkt in klist_out['tickets']: - if tkt['server'].startswith('krbtgt'): - to_check = tkt - - assert to_check is not None, str(klist_out) - assert 'RENEWABLE' in to_check['flags'] - - call('core.get_jobs', [ - ['method', '=', 'kerberos.wait_for_renewal'], - ['state', '=', 'RUNNING'] - ], {'get': True}) - - -def test_check_ad_machine_account_deleted_after_ad_leave(): - assert len(call('kerberos.keytab.query')) == 0 diff --git a/tests/api2/test_035_ad_idmap.py b/tests/api2/test_035_ad_idmap.py deleted file mode 100644 index cc571861cd218..0000000000000 --- a/tests/api2/test_035_ad_idmap.py +++ /dev/null @@ -1,375 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD -# Location for tests into REST API of FreeNAS - -import pytest -import sys -import os -import json -apifolder = os.getcwd() -sys.path.append(apifolder) -from auto_config import hostname -from base64 import b64decode -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.directory_service import active_directory -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.utils.system import reset_systemd_svcs -from time import sleep - -try: - from config import AD_DOMAIN, ADPASSWORD, ADUSERNAME, AD_COMPUTER_OU - from config import ( - LDAPBASEDN, - LDAPBINDDN, - LDAPBINDPASSWORD, - LDAPHOSTNAME - ) -except ImportError: - Reason = 'ADNameServer AD_DOMAIN, ADPASSWORD, or/and ADUSERNAME are missing in config.py"' - pytestmark = pytest.mark.skip(reason=Reason) - -BACKENDS = [ - "AD", - "AUTORID", - "LDAP", - "NSS", - "RFC2307", - "TDB", - "RID", -] - - -@pytest.fixture(scope="function") -def idmap_domain(): - low, high = call('idmap.get_next_idmap_range') - payload = { - "name": "canary", - "range_low": low, - "range_high": high, - "idmap_backend": "RID", - "options": {}, - } - new_idmap = call('idmap.create', payload) - - try: - yield new_idmap - finally: - call('idmap.delete', new_idmap['id']) - - -@pytest.fixture(scope="module") -def do_ad_connection(request): - call('service.update', 'cifs', {'enable': True}) - try: - with active_directory( - AD_DOMAIN, - ADUSERNAME, - ADPASSWORD, - netbiosname=hostname, - createcomputer=AD_COMPUTER_OU, - ) as ad: - yield ad - finally: - call('service.update', 'cifs', {'enable': False}) - - -def assert_ad_healthy(): - assert call('directoryservices.status')['type'] == 'ACTIVEDIRECTORY' - call('directoryservices.health.check') - - -@pytest.fixture(scope="module") -def backend_data(): - backend_options = call('idmap.backend_options') - workgroup = call('smb.config')['workgroup'] - yield {'options': backend_options, 'workgroup': workgroup} - - -def test_name_sid_resolution(do_ad_connection): - - # get list of AD group gids for user from NSS - ad_acct = call('user.get_user_obj', {'username': f'{ADUSERNAME}@{AD_DOMAIN}', 'get_groups': True}) - groups = set(ad_acct['grouplist']) - - # convert list of gids into sids - sids = call('idmap.convert_unixids', [{'id_type': 'GROUP', 'id': x} for x in groups]) - sidlist = set([x['sid'] for x in sids['mapped'].values()]) - assert len(groups) == len(sidlist) - - # convert sids back into unixids - unixids = call('idmap.convert_sids', list(sidlist)) - assert set([x['id'] for x in unixids['mapped'].values()]) == groups - - -@pytest.mark.parametrize('backend', BACKENDS) -def test_backend_options(do_ad_connection, backend_data, backend): - """ - Tests for backend options are performend against - the backend for the domain we're joined to - (DS_TYPE_ACTIVEDIRECTORY) so that auto-detection - works correctly. The three default idmap backends - DS_TYPE_ACTIVEDIRECTORY, DS_TYPE_LDAP, - DS_TYPE_DEFAULT_DOMAIN have hard-coded ids and - so we don't need to look them up. - """ - reset_systemd_svcs('winbind smbd') - opts = backend_data['options'][backend]['parameters'].copy() - WORKGROUP = backend_data['workgroup'] - set_secret = False - - payload = { - "name": "DS_TYPE_ACTIVEDIRECTORY", - "range_low": "1000000001", - "range_high": "2000000000", - "idmap_backend": backend, - "options": {} - } - payload3 = {"options": {}} - for k, v in opts.items(): - """ - Populate garbage data where an opt is required. - This should get us past the first step of - switching to the backend before doing more - comprehensive tests. - """ - if v['required']: - payload["options"].update({k: "canary"}) - - if backend == 'RFC2307': - payload['options'].update({"ldap_server": "STANDALONE"}) - - if not payload['options']: - payload.pop('options') - - call('idmap.update', 1, payload) - - # We unfortunately need to sleep here on each iteration to allow time for - # winbind to settle down before applying more idmap changes otherwise - # subsequent idmap.update call will time out. - sleep(5) - - if backend == "AUTORID": - IDMAP_CFG = "idmap config * " - else: - IDMAP_CFG = f"idmap config {WORKGROUP} " - - """ - Validate that backend was correctly set in smb.conf. - """ - running_backend = call('smb.getparm', f'{IDMAP_CFG}: backend', 'GLOBAL') - assert running_backend == backend.lower() - - if backend == "RID": - """ - sssd_compat generates a lower range based - on murmur3 hash of domain SID. Since we're validating - basic functionilty, checking that our range_low - changed is sufficient for now. - """ - payload2 = {"options": {"sssd_compat": True}} - out = call('idmap.update', 1, payload2) - assert out['range_low'] != payload['range_low'] - - elif backend == "AUTORID": - """ - autorid is unique among the idmap backends because - its configuration replaces the default idmap backend - "idmap config *". - """ - payload3["options"] = { - "rangesize": 200000, - "readonly": True, - "ignore_builtin": True, - } - call('idmap.update', 1, payload3) - - elif backend == "AD": - payload3["options"] = { - "schema_mode": "SFU", - "unix_primary_group": True, - "unix_nss_info": True, - } - call('idmap.update', 1, payload3) - - elif backend == "LDAP": - payload3["options"] = { - "ldap_base_dn": LDAPBASEDN, - "ldap_user_dn": LDAPBINDDN, - "ldap_url": LDAPHOSTNAME, - "ldap_user_dn_password": LDAPBINDPASSWORD, - "ssl": "ON", - "readonly": True, - } - call('idmap.update', 1, payload3) - secret = payload3["options"].pop("ldap_user_dn_password") - set_secret = True - - elif backend == "RFC2307": - payload3["options"] = { - "ldap_server": "STANDALONE", - "bind_path_user": LDAPBASEDN, - "bind_path_group": LDAPBASEDN, - "user_cn": True, - "ldap_domain": "", - "ldap_url": LDAPHOSTNAME, - "ldap_user_dn": LDAPBINDDN, - "ldap_user_dn_password": LDAPBINDPASSWORD, - "ssl": "ON", - "ldap_realm": True, - } - call('idmap.update', 1, payload3) - r = payload3["options"].pop("ldap_realm") - payload3["options"]["realm"] = r - secret = payload3["options"].pop("ldap_user_dn_password") - set_secret = True - - for k, v in payload3['options'].items(): - """ - At this point we should have added every supported option - for the current backend. Iterate through each option and verify - that it was written to samba's running configuration. - """ - if k in ['realm', 'ssl']: - continue - - res = call('smb.getparm', f'{IDMAP_CFG}: {k}', 'GLOBAL') - assert res is not None, f'Failed to retrieve `{IDMAP_CFG}: {k}` from running configuration' - - if k == 'ldap_url': - v = f'ldaps://{v}' - elif k == 'ldap_domain': - v = None - - if v == 'STANDALONE': - v = 'stand-alone' - - try: - res = json.loads(res) - assert res == v, f"{backend} - [{k}]: {res}" - except json.decoder.JSONDecodeError: - if isinstance(v, bool): - v = str(v) - - if v is None: - assert res in (None, ''), f"{backend} - [{k}]: {res}" - else: - assert v.casefold() == res.casefold(), f"{backend} - [{k}]: {res}" - - if set_secret: - """ - API calls that set an idmap secret should result in the - secret being written to secrets.tdb in Samba's private - directory. To check this, force a secrets db dump, check - for keys, then decode secret. - """ - idmap_secret = call('directoryservices.secrets.get_ldap_idmap_secret', WORKGROUP, LDAPBINDDN) - db_secrets = call('directoryservices.secrets.get_db_secrets')[f'{hostname.upper()}$'] - - # Check that our secret is written and stored in secrets backup correctly - assert idmap_secret == db_secrets[f"SECRETS/GENERIC/IDMAP_LDAP_{WORKGROUP}/{LDAPBINDDN}"] - decoded_sec = b64decode(idmap_secret).rstrip(b'\x00').decode() - assert secret == decoded_sec, idmap_secret - - # Use net command via samba to rewrite secret and make sure it is same - ssh(f"net idmap set secret {WORKGROUP} '{secret}'") - new_idmap_secret = call('directoryservices.secrets.get_ldap_idmap_secret', WORKGROUP, LDAPBINDDN) - assert idmap_secret == new_idmap_secret - - secrets_dump = call('directoryservices.secrets.dump') - assert secrets_dump == db_secrets - - # reset idmap backend to RID to ensure that winbindd is running - reset_systemd_svcs('winbind smbd') - - payload = { - "name": "DS_TYPE_ACTIVEDIRECTORY", - "range_low": "1000000001", - "range_high": "2000000000", - "idmap_backend": 'RID', - "options": {} - } - call('idmap.update', 1, payload) - - -def test_clear_idmap_cache(do_ad_connection): - call('idmap.clear_idmap_cache', job=True) - - -def test_idmap_overlap_fail(do_ad_connection): - """ - It should not be possible to set an idmap range for a new - domain that overlaps an existing one. - """ - assert_ad_healthy() - payload = { - "name": "canary", - "range_low": "20000", - "range_high": "2000000000", - "idmap_backend": "RID", - "options": {} - } - with pytest.raises(ValidationErrors): - call('idmap.create', payload) - - -def test_idmap_default_domain_name_change_fail(): - """ - It should not be possible to change the name of a - default idmap domain. - """ - assert_ad_healthy() - payload = { - "name": "canary", - "range_low": "1000000000", - "range_high": "2000000000", - "idmap_backend": "RID", - "options": {} - } - with pytest.raises(ValidationErrors): - call('idmap.create', payload) - - -def test_idmap_low_high_range_inversion_fail(request): - """ - It should not be possible to set an idmap low range - that is greater than its high range. - """ - assert_ad_healthy() - payload = { - "name": "canary", - "range_low": "2000000000", - "range_high": "1900000000", - "idmap_backend": "RID", - } - with pytest.raises(ValidationErrors): - call('idmap.create', payload) - - -def test_idmap_new_domain_duplicate_fail(idmap_domain): - """ - It should not be possible to create a new domain that - has a name conflict with an existing one. - """ - low, high = call('idmap.get_next_idmap_range') - payload = { - "name": idmap_domain["name"], - "range_low": low, - "range_high": high, - "idmap_backend": "RID", - } - with pytest.raises(ValidationErrors): - call('idmap.create', payload) - - -def test_idmap_new_domain_autorid_fail(idmap_domain): - """ - It should only be possible to set AUTORID on - default domain. - """ - payload = { - "idmap_backend": "AUTORID", - } - with pytest.raises(ValidationErrors): - call('idmap.update', idmap_domain['id'], payload) diff --git a/tests/api2/test_040_ad_user_group_cache.py b/tests/api2/test_040_ad_user_group_cache.py deleted file mode 100644 index 00dbcb17c8f54..0000000000000 --- a/tests/api2/test_040_ad_user_group_cache.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/env python3 - -import errno -import pytest -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import SSH_TEST -from auto_config import password, user -from middlewared.service_exception import CallError -from middlewared.test.integration.assets.directory_service import active_directory -from middlewared.test.integration.utils import call - - -WINBIND_SEPARATOR = "\\" - - -@pytest.fixture(scope="module") -def do_ad_connection(request): - with active_directory() as ad: - # make sure we are extra sure cache fill complete - cache_fill_job = call( - 'core.get_jobs', - [['method', '=', 'directoryservices.cache.refresh_impl']], - {'order_by': ['-id'], 'get': True} - ) - if cache_fill_job['state'] == 'RUNNING': - call('core.job_wait', cache_fill_job['id'], job=True) - - users = [x['username'] for x in call( - 'user.query', [['local', '=', False]], - )] - - set_users = set(users) - assert len(set_users) == len(users) - - groups = [x['name'] for x in call( - 'group.query', [['local', '=', False]], - )] - - set_groups = set(groups) - assert len(set_groups) == len(groups) - - yield ad | {'users': set_users, 'groups': set_groups} - - -def get_ad_user_and_group(ad_connection): - WORKGROUP = ad_connection['dc_info']['Pre-Win2k Domain'] - - domain_prefix = f'{WORKGROUP.upper()}{WINBIND_SEPARATOR}' - ad_user = ad_connection['user_obj']['pw_name'] - ad_group = f'{domain_prefix}domain users' - - user = call( - 'user.query', [['username', '=', ad_user]], - {'get': True} - ) - - group = call( - 'group.query', [['name', '=', ad_group]], - {'get': True} - ) - - return (user, group) - - -def test_check_for_ad_users(do_ad_connection): - """ - This test validates that wbinfo -u output matches entries - we get through user.query - """ - cmd = "wbinfo -u" - results = SSH_TEST(cmd, user, password) - assert results['result'], str(results['output']) - wbinfo_entries = set(results['stdout'].splitlines()) - - assert wbinfo_entries == do_ad_connection['users'] - - -def test_check_for_ad_groups(do_ad_connection): - """ - This test validates that wbinfo -g output matches entries - we get through group.query - """ - cmd = "wbinfo -g" - results = SSH_TEST(cmd, user, password) - assert results['result'], str(results['output']) - wbinfo_entries = set(results['stdout'].splitlines()) - - assert wbinfo_entries == do_ad_connection['groups'] - - -def test_check_directoryservices_cache_refresh(do_ad_connection): - """ - This test validates that middleware can successfully rebuild the - directory services cache from scratch using the public API. - - This currently happens once per 24 hours. Result of failure here will - be lack of users/groups visible in webui. - """ - - # Cache resides in tdb files. Remove the files to clear cache. - cmd = 'rm -f /root/tdb/persistent/*' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - # directoryservices.cache_refresh job causes us to rebuild / refresh LDAP / AD users. - call('directoryservices.cache.refresh_impl', job=True) - - users = set([x['username'] for x in call( - 'user.query', [['local', '=', False]] - )]) - - assert users == do_ad_connection['users'] - - groups = set([x['name'] for x in call( - 'group.query', [['local', '=', False]], - )]) - - assert groups == do_ad_connection['groups'] - - -def test_check_lazy_initialization_of_users_and_groups_by_name(do_ad_connection): - """ - When users explicitly search for a directory service or other user - by name or id we should hit pwd and grp modules and synthesize a - result if the user / group is not in the cache. This special behavior - only occurs when single filter of "name =" or "id =". So after the - initial query that should result in insertion, we add a second filter - to only hit the cache. Code paths are slightly different for lookups - by id or by name and so they are tested separately. - """ - - cmd = 'rm -f /root/tdb/persistent/*' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - ad_user, ad_group = get_ad_user_and_group(do_ad_connection) - - assert ad_user['id_type_both'] is True - assert ad_user['immutable'] is True - assert ad_user['local'] is False - assert ad_group['id_type_both'] is True - assert ad_group['local'] is False - - cache_names = set([x['username'] for x in call( - 'user.query', [['local', '=', False]], - )]) - - assert cache_names == {ad_user['username']} - - cache_names = set([x['name'] for x in call( - 'group.query', [['local', '=', False]], - )]) - - assert cache_names == {ad_group['name']} - - -def test_check_lazy_initialization_of_users_and_groups_by_id(do_ad_connection): - """ - When users explicitly search for a directory service or other user - by name or id we should hit pwd and grp modules and synthesize a - result if the user / group is not in the cache. This special behavior - only occurs when single filter of "name =" or "id =". So after the - initial query that should result in insertion, we add a second filter - to only hit the cache. Code paths are slightly different for lookups - by id or by name and so they are tested separately. - """ - - ad_user, ad_group = get_ad_user_and_group(do_ad_connection) - - cmd = 'rm -f /root/tdb/persistent/*' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - call('user.query', [['uid', '=', ad_user['uid']]], {'get': True}) - - call('group.query', [['gid', '=', ad_group['gid']]], {'get': True}) - - cache_names = set([x['username'] for x in call( - 'user.query', [['local', '=', False]], - )]) - - assert cache_names == {ad_user['username']} - - cache_names = set([x['name'] for x in call( - 'group.query', [['local', '=', False]], - )]) - - assert cache_names == {ad_group['name']} - -@pytest.mark.parametrize('op_type', ('UPDATE', 'DELETE')) -def test_update_delete_failures(do_ad_connection, op_type): - ad_user, ad_group = get_ad_user_and_group(do_ad_connection) - - for acct, prefix in ((ad_user, 'user'), (ad_group, 'group')): - with pytest.raises(CallError) as ce: - if op_type == 'UPDATE': - call(f'{prefix}.update', acct['id'], {'smb': False}) - else: - call(f'{prefix}.delete', acct['id']) - - assert ce.value.errno == errno.EPERM diff --git a/tests/api2/test_050_alert.py b/tests/api2/test_050_alert.py deleted file mode 100644 index 0ac59191c27ee..0000000000000 --- a/tests/api2/test_050_alert.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python3 - -import pytest -import os -import sys -from pytest_dependency import depends -from time import sleep -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET, POST, SSH_TEST -from auto_config import password, user, pool_name -from middlewared.test.integration.utils import call - - - -def test_01_get_alert_list(): - results = GET("/alert/list/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - - -def test_02_get_alert_list_categories(): - results = GET("/alert/list_categories/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - assert results.json(), results.json() - - -def test_03_get_alert_list_policies(): - results = GET("/alert/list_policies/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - assert results.json(), results.json() - - -@pytest.mark.dependency(name='degrade_pool') -def test_04_degrading_a_pool_to_create_an_alert(request): - global gptid - get_pool = GET(f"/pool/?name={pool_name}").json()[0] - id_path = '/dev/disk/by-partuuid/' - gptid = get_pool['topology']['data'][0]['path'].replace(id_path, '') - cmd = f'zinject -d {gptid} -A fault {pool_name}' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - -def test_05_verify_the_pool_is_degraded(request): - depends(request, ['degrade_pool'], scope="session") - cmd = f'zpool status {pool_name} | grep {gptid}' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - assert 'DEGRADED' in results['output'], results['output'] - - -@pytest.mark.timeout(120) -def test_06_wait_for_the_alert_and_get_the_id(request): - depends(request, ["degrade_pool"], scope="session") - global alert_id - call("alert.process_alerts") - while True: - for line in GET("/alert/list/").json(): - if ( - line['source'] == 'VolumeStatus' and - line['args']['volume'] == pool_name and - line['args']['state'] == 'DEGRADED' - ): - alert_id = line['id'] - return - - sleep(1) - - -def test_08_dimiss_the_alert(request): - depends(request, ["degrade_pool"], scope="session") - results = POST("/alert/dismiss/", alert_id) - assert results.status_code == 200, results.text - assert isinstance(results.json(), type(None)), results.text - - -def test_09_verify_the_alert_is_dismissed(request): - depends(request, ["degrade_pool"], scope="session") - results = GET("/alert/list/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - for line in results.json(): - if line['id'] == alert_id: - assert line['dismissed'] is True, results.text - break - - -def test_10_restore_the_alert(request): - depends(request, ["degrade_pool"], scope="session") - results = POST("/alert/restore/", alert_id) - assert results.status_code == 200, results.text - assert isinstance(results.json(), type(None)), results.text - - -def test_11_verify_the_alert_is_restored(request): - depends(request, ["degrade_pool"], scope="session") - results = GET(f"/alert/list/?id={alert_id}") - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - for line in results.json(): - if line['id'] == alert_id: - assert line['dismissed'] is False, results.text - break - - -def test_12_clear_the_pool_degradation(request): - depends(request, ["degrade_pool"], scope="session") - cmd = f'zpool clear {pool_name}' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - -def test_13_verify_the_pool_is_not_degraded(request): - depends(request, ["degrade_pool"], scope="session") - cmd = f'zpool status {pool_name} | grep {gptid}' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - assert 'DEGRADED' not in results['output'], results['output'] - - -@pytest.mark.timeout(120) -def test_14_wait_for_the_alert_to_disappear(request): - depends(request, ["degrade_pool"], scope="session") - while True: - if alert_id not in GET("/alert/list/").text: - assert True - break - sleep(1) diff --git a/tests/api2/test_070_alertservice.py b/tests/api2/test_070_alertservice.py deleted file mode 100644 index b4cfe2941b6a3..0000000000000 --- a/tests/api2/test_070_alertservice.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python3 - -import pytest -import os -import sys -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET, POST, PUT, DELETE - - -def test_01_get_alertservice(): - results = GET("/alertservice/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - - -def test_02_get_alertservice_list_types(): - results = GET("/alertservice/list_types/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - assert results.json(), results.text - - -def test_03_create_an_alertservice(): - global alertservice_id, payload, results - payload = { - "name": "Critical Email Test", - "type": "Mail", - "attributes": { - "email": "eric.spam@ixsystems.com" - }, - "level": "CRITICAL", - "enabled": True - } - results = POST("/alertservice/", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict), results.text - alertservice_id = results.json()['id'] - - -@pytest.mark.parametrize('data', ["name", "type", "attributes", "level", "enabled"]) -def test_04_verify_the_alertservice_creation_results(data): - assert results.json()[data] == payload[data], results.text - - -def test_05_get_alertservice_with_id(): - global results - results = GET(f"/alertservice/id/{alertservice_id}") - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict), results.text - - -@pytest.mark.parametrize('data', ["name", "type", "attributes", "level", "enabled"]) -def test_06_verify_the_id_alertservice_results(data): - assert results.json()[data] == payload[data], results.text - - -def test_07_change_config_to_alertservice_id(): - global alertservice_id, payload, results - payload = { - "name": "Warning Email Test", - "type": "Mail", - "attributes": { - "email": "william.spam@ixsystems.com@" - }, - "level": "WARNING", - "enabled": False - } - results = PUT(f"/alertservice/id/{alertservice_id}", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict), results.text - - -@pytest.mark.parametrize('data', ["name", "type", "attributes", "level", "enabled"]) -def test_08_verify_the_alertservice_changes_results(data): - assert results.json()[data] == payload[data], results.text - - -def test_09_get_alertservice_changes_with_id(): - global results - results = GET(f"/alertservice/id/{alertservice_id}") - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict), results.text - - -@pytest.mark.parametrize('data', ["name", "type", "attributes", "level", "enabled"]) -def test_10_verify_the_id_alertservice_changes_results(data): - assert results.json()[data] == payload[data], results.text - - -def test_11_delete_alertservice(): - results = DELETE(f"/alertservice/id/{alertservice_id}") - assert results.status_code == 200, results.text - - -def test_12_verify_alertservice_is_delete(): - results = GET(f"/alertservice/id/{alertservice_id}") - assert results.status_code == 404, results.text diff --git a/tests/api2/test_090_boot.py b/tests/api2/test_090_boot.py deleted file mode 100644 index 989eb78026ffa..0000000000000 --- a/tests/api2/test_090_boot.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD - -import pytest -import sys -import os -from time import time, sleep -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET - - -@pytest.mark.dependency(name='BOOT_DISKS') -def test_01_get_boot_disks(): - results = GET('/boot/get_disks/') - assert results.status_code == 200, results.text - disks = results.json() - assert isinstance(disks, list) is True, results.text - assert disks, results.text - - -@pytest.mark.dependency(name='BOOT_STATE') -def test_02_get_boot_state(request): - depends(request, ['BOOT_DISKS']) - results = GET('/boot/get_state/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict) is True, results.text - global boot_state - boot_state = results.json() - - -@pytest.mark.dependency(name='BOOT_SCRUB') -def test_03_get_boot_scrub(request): - depends(request, ['BOOT_STATE']) - global JOB_ID - results = GET('/boot/scrub/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), int) is True, results.text - JOB_ID = results.json() - - -def test_04_verify_boot_scrub_job(request): - depends(request, ['BOOT_SCRUB']) - stop_time = time() + 600 - while True: - get_job = GET(f'/core/get_jobs/?id={JOB_ID}') - job_status = get_job.json()[0] - if job_status['state'] in ('RUNNING', 'WAITING'): - if stop_time <= time(): - assert False, "Job Timeout\n\n" + get_job.text - break - sleep(5) - else: - assert job_status['state'] == 'SUCCESS', get_job.text - break diff --git a/tests/api2/test_100_bootenv.py b/tests/api2/test_100_bootenv.py deleted file mode 100644 index 8202125574f6e..0000000000000 --- a/tests/api2/test_100_bootenv.py +++ /dev/null @@ -1,107 +0,0 @@ -import sys -import os -from time import sleep -from unittest.mock import ANY - -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import POST, DELETE, GET, PUT, wait_on_job - - -def test_01_get_the_activated_bootenv(): - global active_be_id - results = GET('/bootenv/?activated=True') - assert results.status_code == 200, results.text - active_be_id = results.json()[0]['id'] - - -def test_02_create_be_duplicate_name(): - payload = {"name": active_be_id, "source": active_be_id} - results = POST("/bootenv/", payload) - assert results.status_code == 422, results.text - assert results.json() == {"bootenv_create.name": ANY} - - -def test_02_creating_a_new_boot_environment_from_the_active_boot_environment(): - payload = {"name": "bootenv01", "source": active_be_id} - results = POST("/bootenv/", payload) - assert results.status_code == 200, results.text - sleep(1) - - -def test_03_look_new_bootenv_is_created(): - assert len(GET('/bootenv?name=bootenv01').json()) == 1 - - -def test_04_activate_bootenv01(): - results = POST("/bootenv/id/bootenv01/activate/", None) - assert results.status_code == 200, results.text - - -# Update tests -def test_05_cloning_a_new_boot_environment(): - payload = {"name": "bootenv02", "source": "bootenv01"} - results = POST("/bootenv/", payload) - assert results.status_code == 200, results.text - sleep(1) - - -def test_06_activate_bootenv02(): - payload = None - results = POST("/bootenv/id/bootenv02/activate/", payload) - assert results.status_code == 200, results.text - - -def test_07_change_boot_environment_name(): - payload = {"name": "bootenv03"} - results = PUT("/bootenv/id/bootenv01/", payload) - assert results.status_code == 200, results.text - - -def test_08_set_keep_attribute_true(): - payload = {"keep": True} - results = POST("/bootenv/id/bootenv03/set_attribute/", payload) - assert results.status_code == 200, results.text - - -def test_09_activate_bootenv03(): - payload = None - results = POST("/bootenv/id/bootenv03/activate/", payload) - assert results.status_code == 200, results.text - - -# Delete tests -def test_10_removing_a_boot_environment_02(): - global job_id - results = DELETE("/bootenv/id/bootenv02/") - assert results.status_code == 200, results.text - job_id = results.json() - - -def test_11_verify_the_removing_be_job_is_successfull(request): - job_status = wait_on_job(job_id, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_12_set_keep_attribute_true(): - payload = {"keep": False} - results = POST("/bootenv/id/bootenv03/set_attribute/", payload) - assert results.status_code == 200, results.text - - -def test_13_activate_default(): - payload = None - results = POST(f"/bootenv/id/{active_be_id}/activate/", payload) - assert results.status_code == 200, results.text - - -def test_14_removing_a_boot_environment_03(): - global job_id - results = DELETE("/bootenv/id/bootenv03/") - assert results.status_code == 200, results.text - job_id = results.json() - - -def test_15_verify_the_removing_be_job_is_successfull(request): - job_status = wait_on_job(job_id, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) diff --git a/tests/api2/test_110_certificate.py b/tests/api2/test_110_certificate.py deleted file mode 100644 index 5dc5872cd6085..0000000000000 --- a/tests/api2/test_110_certificate.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD - -import pytest -import re -import sys -import os - -from time import sleep -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET, DELETE, POST - -try: - from config import ( - LDAPBASEDN, - LDAPBINDDN, - LDAPBINDPASSWORD, - LDAPHOSTNAME, - ) -except ImportError: - Reason = 'LDAP* variable are not setup in config.py' - # comment pytestmark for development testing with --dev-test - pytestmark = pytest.mark.skipif(True, reason=Reason) - - -def test_01_get_certificate_query(): - results = GET('/certificate/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - - -def test_create_idmap_certificate(): - global certificate_id, idmap_id - payload = { - 'name': 'BOB', - 'range_low': 1000, - 'range_high': 2000, - 'certificate': 1, - "idmap_backend": "RFC2307", - 'options': { - "ldap_server": "STANDALONE", - "bind_path_user": LDAPBASEDN, - "bind_path_group": LDAPBASEDN, - "ldap_url": LDAPHOSTNAME, - "ldap_user_dn": LDAPBINDDN, - "ldap_user_dn_password": LDAPBINDPASSWORD, - "ssl": "ON", - "ldap_realm": False, - } - } - results = POST('/idmap/', payload) - assert results.status_code == 200, results.text - idmap_id = results.json()['id'] - certificate_id = results.json()['certificate']['id'] - - -def test_02_delete_used_certificate(): - global job_id - results = DELETE(f'/certificate/id/{certificate_id}/', True) - assert results.status_code == 200, results.text - job_id = int(results.text) - - -def test_03_verify_certificate_delete_failed(): - while True: - get_job = GET(f'/core/get_jobs/?id={job_id}') - assert get_job.status_code == 200, get_job.text - job_status = get_job.json()[0] - if job_status['state'] in ('RUNNING', 'WAITING'): - sleep(5) - else: - assert job_status['state'] == 'FAILED', get_job.text - assert bool(re.search( - r'Certificate is being used by following service.*IDMAP', job_status['error'], flags=re.DOTALL - )) is True, job_status['error'] - break - - -def test_04_delete_idmap(): - results = DELETE(f'/idmap/id/{idmap_id}/') - assert results.status_code == 200, results.text diff --git a/tests/api2/test_120_certificateauthority.py b/tests/api2/test_120_certificateauthority.py deleted file mode 100644 index e0637c352fea3..0000000000000 --- a/tests/api2/test_120_certificateauthority.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD - -import pytest -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET - - -def test_01_get_certificateauthority_query(): - results = GET('/certificateauthority/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text diff --git a/tests/api2/test_140_core.py b/tests/api2/test_140_core.py deleted file mode 100644 index c32d3a4b49f33..0000000000000 --- a/tests/api2/test_140_core.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD - -import pytest -import sys -import os -from urllib.request import urlretrieve -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET, POST -from middlewared.test.integration.utils.client import truenas_server - - -def test_01_get_core_jobs(): - results = GET('/core/get_jobs/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), list) is True - - -def test_02_get_core_ping(): - results = GET('/core/ping/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), str) is True - assert results.json() == 'pong' - - -def test_03_get_download_info_for_config_dot_save(): - payload = { - 'method': 'config.save', - 'args': [], - 'filename': 'freenas.db' - } - results = POST('/core/download/', payload) - - assert results.status_code == 200, results.text - assert isinstance(results.json(), list) is True, results.text - global url - url = results.json()[1] - global job_id - job_id = results.json()[0] - - -def test_04_verify_job_id_state_is_running(): - results = GET(f'/core/get_jobs/?id={job_id}') - assert results.json()[0]['state'] == 'RUNNING', results.text - - -def test_05_download_from_url(): - rv = urlretrieve(f'http://{truenas_server.ip}{url}') - stat = os.stat(rv[0]) - assert stat.st_size > 0 - - -def test_06_verify_job_id_state_is_success(): - results = GET(f'/core/get_jobs/?id={job_id}') - assert results.json()[0]['state'] == 'SUCCESS', results.text diff --git a/tests/api2/test_150_cronjob.py b/tests/api2/test_150_cronjob.py deleted file mode 100644 index 1bfaafda92993..0000000000000 --- a/tests/api2/test_150_cronjob.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python3 -# License: BSD - -import sys -import os -import pytest -from time import sleep -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import POST, PUT, SSH_TEST, GET, DELETE -from auto_config import user, password - -TESTFILE = '/tmp/.testFileCreatedViaCronjob' - - -@pytest.fixture(scope='module') -def cronjob_dict(): - return {} - - -def test_01_Creating_new_cron_job_which_will_run_every_minute(cronjob_dict): - results = POST('/cronjob/', { - 'user': 'root', - 'command': f'touch "{TESTFILE}"', - 'schedule': {'minute': '*/1'} - }) - assert results.status_code == 200, results.text - cronjob_dict.update(results.json()) - assert isinstance(cronjob_dict['id'], int) is True - - -def test_02_Checking_to_see_if_cronjob_was_created_and_enabled(cronjob_dict): - id = cronjob_dict['id'] - results = GET(f'/cronjob?id={id}') - assert results.json()[0]['enabled'] is True - - -def test_03_Wait_a_minute(): - sleep(65) - - -def test_04_Updating_cronjob_status_to_disabled_updating_command(cronjob_dict): - id = cronjob_dict['id'] - results = PUT(f'/cronjob/id/{id}/', { - 'enabled': False - }) - assert results.status_code == 200, results.text - - -def test_05_Checking_that_API_reports_the_cronjob_as_updated(cronjob_dict): - id = cronjob_dict['id'] - results = GET(f'/cronjob?id={id}') - assert results.json()[0]['enabled'] is False - - -def test_06_Deleting_test_file_created_by_cronjob(request): - results = SSH_TEST(f'rm "{TESTFILE}"', user, password) - assert results['result'] is True, results['output'] - - -def test_07_Deleting_cron_job_which_will_run_every_minute(cronjob_dict): - id = cronjob_dict['id'] - results = DELETE(f'/cronjob/id/{id}/', None) - assert results.status_code == 200, results.text - - -def test_08_Check_that_the_API_reports_the_cronjob_as_deleted(cronjob_dict): - id = cronjob_dict['id'] - results = GET(f'/cronjob?id={id}') - assert results.json() == [], results.text diff --git a/tests/api2/test_190_filesystem.py b/tests/api2/test_190_filesystem.py deleted file mode 100644 index 15c391e721915..0000000000000 --- a/tests/api2/test_190_filesystem.py +++ /dev/null @@ -1,367 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD - -import errno -import pytest -import stat -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) - -from copy import deepcopy -from functions import POST, PUT, SSH_TEST, wait_on_job -from auto_config import pool_name, user, password -from middlewared.service_exception import CallError -from middlewared.test.integration.assets.filesystem import directory -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, ssh -from utils import create_dataset - -group = 'root' -path = '/etc' -path_list = ['default', 'kernel', 'zfs', 'ssh'] -random_path = ['/boot/grub', '/root', '/bin', '/usr/bin'] - - -@pytest.mark.parametrize('path', random_path) -def test_03_get_filesystem_stat_(path): - results = POST('/filesystem/stat/', path) - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict) is True, results.text - assert isinstance(results.json()['size'], int) is True, results.text - assert isinstance(results.json()['mode'], int) is True, results.text - assert results.json()['uid'] == 0, results.text - assert results.json()['gid'] == 0, results.text - assert isinstance(results.json()['atime'], float) is True, results.text - assert isinstance(results.json()['mtime'], float) is True, results.text - assert isinstance(results.json()['ctime'], float) is True, results.text - assert isinstance(results.json()['dev'], int) is True, results.text - assert isinstance(results.json()['inode'], int) is True, results.text - assert results.json()['nlink'] in tuple(range(10)), results.text - assert results.json()['user'] == 'root', results.text - assert results.json()['group'] == group, results.text - assert results.json()['acl'] is False, results.text - - -def test_04_test_filesystem_statfs_fstype(request): - # test zfs fstype first - parent_path = f'/mnt/{pool_name}' - data = call('filesystem.statfs', parent_path) - assert data['fstype'] == 'zfs', data['fstype'] - - # mount nested tmpfs entry and make sure statfs - # returns `tmpfs` as the fstype - # mkdir - nested_path = f'{parent_path}/tmpfs' - cmd1 = f'mkdir -p {nested_path}' - results = SSH_TEST(cmd1, user, password) - assert results['result'] is True, results['output'] - - # mount tmpfs - cmd2 = f'mount -t tmpfs -o size=10M tmpfstest {nested_path}' - results = SSH_TEST(cmd2, user, password) - assert results['result'] is True, results['output'] - - # test fstype - data = call('filesystem.statfs', nested_path) - assert data['fstype'] == 'tmpfs', data['fstype'] - - # cleanup - cmd3 = f'umount {nested_path}' - results = SSH_TEST(cmd3, user, password) - assert results['result'] is True, results['output'] - cmd4 = f'rmdir {nested_path}' - results = SSH_TEST(cmd4, user, password) - assert results['result'] is True, results['output'] - - -def test_05_set_immutable_flag_on_path(request): - t_path = os.path.join('/mnt', pool_name, 'random_directory_immutable') - t_child_path = os.path.join(t_path, 'child') - - with directory(t_path) as d: - for flag_set in (True, False): - call('filesystem.set_immutable', flag_set, d) - # We test 2 things - # 1) Writing content to the parent path fails/succeeds based on "set" - # 2) "is_immutable_set" returns sane response - if flag_set: - with pytest.raises(PermissionError): - call('filesystem.mkdir', f'{t_child_path}_{flag_set}') - else: - call('filesystem.mkdir', f'{t_child_path}_{flag_set}') - - is_immutable = call('filesystem.is_immutable', t_path) - assert is_immutable == flag_set, 'Immutable flag is still not set' if flag_set else 'Immutable flag is still set' - - -def test_06_test_filesystem_listdir_exclude_non_mounts(): - # create a random directory at top-level of '/mnt' - mnt = '/mnt/' - randir = 'random_dir' - path = mnt + randir - - with directory(path) as _: - # now call filesystem.listdir specifying '/mnt' as path - # and ensure `randir` is not in the output - results = POST('/filesystem/listdir/', {'path': mnt}) - assert results.status_code == 200, results.text - assert not any(i['name'] == randir for i in results.json()), f'{randir} should not be listed' - - -def test_07_test_filesystem_stat_filetype(request): - """ - This test checks that file types are properly - identified through the filesystem plugin in middleware. - There is an additional check to make sure that paths - in the ZFS CTL directory (.zfs) are properly flagged. - """ - ds_name = 'stat_test' - snap_name = f'{ds_name}_snap1' - path = f'/mnt/{pool_name}/{ds_name}' - targets = ['file', 'directory', 'symlink', 'other'] - cmds = [ - f'mkdir {path}/directory', - f'touch {path}/file', - f'ln -s {path}/file {path}/symlink', - f'mkfifo {path}/other' - ] - - with create_dataset(f'{pool_name}/{ds_name}'): - results = SSH_TEST(' && '.join(cmds), user, password) - assert results['result'] is True, str(results) - - for x in targets: - target = f'{path}/{x}' - statout = call('filesystem.stat', target) - assert statout['type'] == x.upper(), str(statout) - assert not statout['is_ctldir'] - - call('zfs.snapshot.create', { - 'dataset': f'{pool_name}/{ds_name}', - 'name': snap_name, - 'recursive': False, - }) - for x in targets: - target = f'{path}/.zfs/snapshot/{snap_name}/{x}' - statout = call('filesystem.stat', target) - assert statout['type'] == x.upper(), str(statout) - assert statout['is_ctldir'] - - assert call('filesystem.stat', f'{path}/.zfs/snapshot/{snap_name}')['is_ctldir'] - assert all(dirent['is_ctldir'] for dirent in call('filesystem.listdir', f'{path}/.zfs/snapshot')) - assert call('filesystem.stat', f'{path}/.zfs/snapshot')['is_ctldir'] - assert all(dirent['is_ctldir'] for dirent in call('filesystem.listdir', f'{path}/.zfs')) - assert call('filesystem.stat', f'{path}/.zfs')['is_ctldir'] - - -def test_08_test_fiilesystem_statfs_flags(request): - """ - This test verifies that changing ZFS properties via - middleware causes mountinfo changes visible via statfs. - """ - ds_name = 'statfs_test' - target = f'{pool_name}/{ds_name}' - target_url = target.replace('/', '%2F') - path = f'/mnt/{target}' - - # tuple: ZFS property name, property value, mountinfo value - properties = [ - ("readonly", "ON", "RO"), - ("readonly", "OFF", "RW"), - ("atime", "OFF", "NOATIME"), - ("exec", "OFF", "NOEXEC"), - ("acltype", "NFSV4", "NFS4ACL"), - ("acltype", "POSIX", "POSIXACL"), - ] - - with create_dataset(target): - for p in properties: - # set option we're checking and make sure it's really set - payload = { - p[0]: p[1] - } - if p[0] == 'acltype': - payload.update({ - 'aclmode': 'RESTRICTED' if p[1] == 'NFSV4' else 'DISCARD' - }) - results = PUT(f'/pool/dataset/id/{target_url}', payload) - assert results.status_code == 200, results.text - prop_out = results.json()[p[0]] - assert prop_out['value'] == p[1] - - # check statfs results - results = POST('/filesystem/statfs/', path) - assert results.status_code == 200, results.text - - mount_flags = results.json()['flags'] - assert p[2] in mount_flags, f'{path}: ({p[2]}) not in {mount_flags}' - - -def test_09_test_dosmodes(): - modes = ['readonly', 'hidden', 'system', 'archive', 'offline', 'sparse'] - ds_name = 'dosmode_test' - target = f'{pool_name}/{ds_name}' - path = f'/mnt/{target}' - testpaths = [ - f'{path}/testfile', - f'{path}/testdir', - ] - - with create_dataset(target): - cmd = [ - f'touch {testpaths[0]}', - f'mkdir {testpaths[1]}' - ] - results = SSH_TEST(' && '.join(cmd), user, password) - assert results['result'] is True, str(results) - - for p in testpaths: - expected_flags = call('filesystem.get_zfs_attributes', p) - for m in modes: - to_set = {m: not expected_flags[m]} - res = call('filesystem.set_zfs_attributes', {'path': p, 'zfs_file_attributes': to_set}) - expected_flags.update(to_set) - assert expected_flags == res - - res = call('filesystem.get_zfs_attributes', p) - assert expected_flags == res - - -def test_10_acl_path_execute_validation(): - ds_name = 'acl_execute_test' - target = f'{pool_name}/{ds_name}' - path = f'/mnt/{target}' - - NFSV4_DACL = [ - {'tag': 'owner@', 'id': -1, 'type': 'ALLOW', 'perms': {'BASIC': 'FULL_CONTROL'}, 'flags': {'BASIC': 'INHERIT'}}, - {'tag': 'group@', 'id': -1, 'type': 'ALLOW', 'perms': {'BASIC': 'FULL_CONTROL'}, 'flags': {'BASIC': 'INHERIT'}}, - {'tag': 'USER', 'id': 65534, 'type': 'ALLOW', 'perms': {'BASIC': 'FULL_CONTROL'}, 'flags': {'BASIC': 'INHERIT'}}, - {'tag': 'GROUP', 'id': 65534, 'type': 'ALLOW', 'perms': {'BASIC': 'FULL_CONTROL'}, 'flags': {'BASIC': 'INHERIT'}}, - ] - - # Do NFSv4 checks - with create_dataset(target, {'acltype': 'NFSV4', 'aclmode': 'PASSTHROUGH'}, None, 770): - sub_ds_name = f'{ds_name}/sub' - sub_target = f'{pool_name}/{sub_ds_name}' - sub_path = f'/mnt/{sub_target}' - - """ - For NFSv4 ACLs four different tags generate user tokens differently: - 1) owner@ tag will test `uid` from payload - 2) group@ tag will test `gid` from payload - 3) GROUP will test the `id` in payload with id_type - 4) USER will test the `id` in mayload with USER id_type - """ - - # Start with testing denials - with create_dataset(sub_target, {'acltype': 'NFSV4', 'aclmode': 'PASSTHROUGH'}): - acl = deepcopy(NFSV4_DACL) - names = ['daemon', 'apps', 'nobody', 'nogroup'] - for idx, entry in enumerate(NFSV4_DACL): - perm_job = POST('/filesystem/setacl/', - {'path': sub_path, "dacl": acl, 'uid': 1, 'gid': 568}) - assert perm_job.status_code == 200, perm_job.text - - job_status = wait_on_job(perm_job.json(), 180) - - # all of these tests should fail - assert job_status['state'] == 'FAILED', str(job_status['results']) - assert names[idx] in job_status['results']['error'], job_status['results']['error'] - acl.pop(0) - - # when this test starts, we have 770 perms on parent - for entry in NFSV4_DACL: - # first set permissions on parent dataset - if entry['tag'] == 'owner@': - perm_job = POST('/filesystem/chown/', { - 'path': path, - 'uid': 1, - 'gid': 0 - }) - elif entry['tag'] == 'group@': - perm_job = POST('/filesystem/chown/', { - 'path': path, - 'uid': 0, - 'gid': 568 - }) - elif entry['tag'] == 'USER': - perm_job = POST('/filesystem/setacl/', { - 'path': path, - 'uid': 0, - 'gid': 0, - 'dacl': [entry] - }) - elif entry['tag'] == 'GROUP': - perm_job = POST('/filesystem/setacl/', { - 'path': path, - 'uid': 0, - 'gid': 0, - 'dacl': [entry] - }) - - assert perm_job.status_code == 200, perm_job.text - job_status = wait_on_job(perm_job.json(), 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - # Now set the acl on child dataset. This should succeed - perm_job = POST('/filesystem/setacl/', { - 'path': sub_path, - 'uid': 1, - 'gid': 568, - 'dacl': [entry] - }) - job_status = wait_on_job(perm_job.json(), 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -@pytest.fixture(scope="module") -def file_and_directory(): - with dataset("test_file_and_directory") as ds: - ssh(f"mkdir /mnt/{ds}/test-directory") - ssh(f"touch /mnt/{ds}/test-file") - yield ds - - -@pytest.mark.parametrize("query,result", [ - ([], {"test-directory", "test-file"}), - ([["type", "=", "DIRECTORY"]], {"test-directory"}), - ([["type", "!=", "DIRECTORY"]], {"test-file"}), - ([["type", "=", "FILE"]], {"test-file"}), - ([["type", "!=", "FILE"]], {"test-directory"}), -]) -def test_type_filter(file_and_directory, query, result): - listdir = call("filesystem.listdir", f"/mnt/{file_and_directory}", query) - assert {item["name"] for item in listdir} == result, listdir - - -def test_mkdir_mode(): - with dataset("test_mkdir_mode") as ds: - testdir = os.path.join("/mnt", ds, "testdir") - call("filesystem.mkdir", {'path': testdir, 'options': {'mode': '777'}}) - st = call("filesystem.stat", testdir) - assert stat.S_IMODE(st["mode"]) == 0o777 - - -def test_mkdir_chmod_failure(): - with dataset("test_mkdir_chmod", {"share_type": "SMB"}) as ds: - testdir = os.path.join("/mnt", ds, "testdir") - with pytest.raises(PermissionError): - call("filesystem.mkdir", {'path': testdir, 'options': {'mode': '777'}}) - - with pytest.raises(CallError) as ce: - call("filesystem.stat", testdir) - - assert ce.value.errno == errno.ENOENT - - mkdir_st = call("filesystem.mkdir", {'path': testdir, 'options': {'mode': '777', 'raise_chmod_error': False}}) - - st = call("filesystem.stat", testdir) - # Verify that mode output returned from mkdir matches what was actually set - assert st['mode'] == mkdir_st['mode'] - - # mkdir succeeded, but chmod failed so we get mode based on inherited ACL (SMB preset) - assert stat.S_IMODE(st["mode"]) == 0o770 diff --git a/tests/api2/test_200_ftp.py b/tests/api2/test_200_ftp.py deleted file mode 100644 index 8c650206a56cc..0000000000000 --- a/tests/api2/test_200_ftp.py +++ /dev/null @@ -1,1404 +0,0 @@ -import contextlib -import copy -import json -import os -import subprocess -from ftplib import all_errors, error_temp -from time import sleep -from timeit import default_timer as timer -from types import SimpleNamespace - -import pytest -from pytest_dependency import depends - -from assets.websocket.server import reboot -from middlewared.test.integration.assets.account import user as ftp_user -from middlewared.test.integration.assets.pool import dataset as dataset_asset -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.utils.client import truenas_server - -from auto_config import password, pool_name, user -from functions import SSH_TEST, send_file -from protocols import ftp_connect, ftp_connection, ftps_connection - -FTP_DEFAULT = {} -DB_DFLT = {} -INIT_DIRS_AND_FILES = { - 'path': None, - 'dirs': [ - {'name': 'init_dir'}, - {'name': 'init_ro_dir', 'perm': '-w', - 'contents': ["ReadOnlyDir_file1", "ReadOnlyDir_file2"]} - ], - 'files': [{'name': 'init_file', 'contents': "Contents of init_file"}, - {'name': 'init_ro_file', 'contents': "RO data", 'perm': '-w'}], -} - - -# ================= Utility Functions ================== - - -@pytest.fixture(scope='module') -def ftp_init_db_dflt(): - # Get the 'default' settings from FTPModel - ftpconf_script = '#!/usr/bin/python3\n' - ftpconf_script += 'import json\n' - ftpconf_script += 'from middlewared.plugins.ftp import FTPModel\n' - ftpconf_script += 'FTPModel_defaults = {}\n' - ftpconf_script += 'for attrib in FTPModel.__dict__.keys():\n' - ftpconf_script += ' if attrib[:4] == "ftp_":\n' - ftpconf_script += ' try:\n' - ftpconf_script += ' val = getattr(getattr(FTPModel, attrib), "default").arg\n' - ftpconf_script += ' except AttributeError:\n' - ftpconf_script += ' val = None\n' - ftpconf_script += ' if not callable(val):\n' - ftpconf_script += ' FTPModel_defaults[attrib] = val\n' - ftpconf_script += 'print(json.dumps(FTPModel_defaults))\n' - cmd_file = open('ftpconf.py', 'w') - cmd_file.writelines(ftpconf_script) - cmd_file.close() - results = send_file('ftpconf.py', 'ftpconf.py', user, password, truenas_server.ip) - assert results['result'], str(results['output']) - rv_defaults = SSH_TEST("python3 ftpconf.py", user, password) - assert rv_defaults['result'], str(rv_defaults) - global FTP_DEFAULT - FTP_DEFAULT = json.loads(rv_defaults['stdout'].strip()) - - # clean up the temporary script - os.remove('ftpconf.py') - results = SSH_TEST('rm ftpconf.py', user, password) - assert results['result'] is True, results - - # # Special cases: The default banner is in a file (see proftpd.conf.mako) - assert FTP_DEFAULT['ftp_banner'] is None, FTP_DEFAULT['ftp_banner'] - - # Make the default model keys match the DB names - global DB_DFLT - DB_DFLT = {k.replace('ftp_', ''): FTP_DEFAULT[k] for k in FTP_DEFAULT} - return DB_DFLT - - -def ftp_set_config(config={}): - # Fixup some settings - if config != {}: - tmpconf = config.copy() - if 'banner' in tmpconf and tmpconf['banner'] is None: - tmpconf['banner'] = "" - if 'anonpath' in tmpconf and tmpconf['anonpath'] is False: - tmpconf['anonpath'] = "" - if 'masqaddress' in tmpconf and tmpconf['masqaddress'] is None: - tmpconf['masqaddress'] = '' - if 'ssltls_certificate_id' in tmpconf and tmpconf['ssltls_certificate_id'] is None: - tmpconf.pop('ssltls_certificate_id') - if 'options' in tmpconf and tmpconf['options'] is None: - tmpconf['options'] = '' - call('ftp.update', tmpconf) - - -def parse_conf_file(file='proftpd'): - results = SSH_TEST(f"cat /etc/proftpd/{file}.conf", user, password) - assert results['result'], str(results) - lines = results['stdout'].splitlines() - - rv = {} - context = [{'server': None}] - for line in lines: - line = line.lstrip() - if not line or line.startswith('#'): - continue - - # Keep track of contexts - if line.startswith('<'): - if line[1] == "/": - context.pop() - continue - else: - c = line.split()[0][1:] - v = line.split()[1][:-1] if len(line.split()) > 1 else None - context.append({c: v}) - continue - - # Process the directive - if 1 < len(line.strip().split()): - # Trap TransferRate directive - if "TransferRate" == line.split()[0]: - tmp = line.split() - directive = ' '.join(tmp[:2]) - value = ' '.join(tmp[2:]) - else: - directive, value = line.strip().split(maxsplit=1) - else: - directive = line.strip() - value = None - entry = {directive: [copy.deepcopy(context), value]} - rv.update(entry) - return rv - - -def query_ftp_service(): - return call('service.query', [['service', '=', 'ftp']], {'get': True}) - - -def validate_proftp_conf(): - ''' - Confirm FTP configuration settings - NB: Avoid calling this for localuser* and anonuser* in the same test - ''' - xlat = {True: "on", False: "off"} - # Retrieve result from the database - ftpConf = call('ftp.config') - parsed = parse_conf_file('proftpd') - - # Sanity spot check settings in proftpd.conf - assert ftpConf['port'] == int(parsed['Port'][1]) - assert ftpConf['clients'] == int(parsed['MaxClients'][1]), f"\nftpConf={ftpConf}\nparsed={parsed}" - assert ftpConf['ipconnections'] == int(parsed['MaxConnectionsPerHost'][1]) - assert ftpConf['loginattempt'] == int(parsed['MaxLoginAttempts'][1]) - assert ftpConf['timeout'] == int(parsed['TimeoutIdle'][1]) - assert ftpConf['timeout_notransfer'] == int(parsed['TimeoutNoTransfer'][1]) - - # Confirm that rootlogin has been removed. - assert ftpConf.get('rootlogin') is None - - if ftpConf['onlyanonymous']: - assert 'User' in parsed - assert ftpConf['anonpath'] == parsed['User'][0][1]['Anonymous'], f"parsed['User'] = {parsed['User']}" - assert parsed['UserAlias'][1] == 'anonymous ftp' - assert parsed['Group'][1] == 'ftp' - assert 'LOGIN' == parsed['AllowAll'][0][2]['Limit'], \ - f"AllowAll must be within , {parsed['AllowAll']}" - else: - assert parsed['User'][1] == 'nobody' - - if ftpConf['onlylocal']: - assert 'AllowAll' in parsed - assert 'LOGIN' == parsed['AllowAll'][0][1]['Limit'], \ - f"AllowAll must be within , {parsed['AllowAll']}" - else: - if not ftpConf['onlyanonymous']: - assert 'AllowAll' not in parsed - - # The absence of onlyanonymous and onlyonly mean some settings are present - if not (ftpConf['onlyanonymous'] or ftpConf['onlylocal']): - assert 'DenyAll' in parsed - assert 'LOGIN' == parsed['DenyAll'][0][1]['Limit'] - # Confirm rootlogin has been removed. - assert 'root' not in parsed['AllowGroup'] - - # The banner is saved to a file - rv_motd = SSH_TEST("cat /etc/proftpd/proftpd.motd", user, password) - assert rv_motd['result'], str(rv_motd) - motd = rv_motd['stdout'].strip() - if ftpConf['banner']: - assert motd == ftpConf['banner'], f"\nproftpd.motd = \'{motd}\'\nbanner = \'{ftpConf['banner']}\'" - - expect_umask = f"{ftpConf['filemask']} {ftpConf['dirmask']}" - assert expect_umask == parsed['Umask'][1], \ - f"Found unexpected Umask entry: expected '{expect_umask}', found '{parsed['Umask'][1]}'" - assert xlat[ftpConf['fxp']] == parsed['AllowForeignAddress'][1] - if ftpConf['resume']: - assert xlat[ftpConf['resume']] == parsed['AllowRetrieveRestart'][1] - assert xlat[ftpConf['resume']] == parsed['AllowStoreRestart'][1] - - # The DefaultRoot setting is defined completly in proftpd.conf.mako as '~ !root' - if ftpConf['defaultroot']: - assert parsed['DefaultRoot'][1] == "~ !root" - - assert xlat[ftpConf['ident']] == parsed['IdentLookups'][1] - assert xlat[ftpConf['reversedns']] == parsed['UseReverseDNS'][1] - - if ftpConf['masqaddress']: - assert ftpConf['masqaddress'] == parsed['MasqueradeAddress'][1] - - if ftpConf['passiveportsmin']: - expect_setting = f"{ftpConf['passiveportsmin']} {ftpConf['passiveportsmax']}" - assert expect_setting == parsed['PassivePorts'][1], \ - f"Found unexpected PassivePorts entry: expected '{expect_setting}', found '{parsed['PassivePorts'][1]}'" - - if ftpConf['localuserbw']: - assert ftpConf['localuserbw'] == int(parsed['TransferRate STOR'][1]) - if ftpConf['localuserdlbw']: - assert ftpConf['localuserdlbw'] == int(parsed['TransferRate RETR'][1]) - if ftpConf['anonuserbw']: - assert ftpConf['anonuserbw'] == int(parsed['TransferRate STOR'][1]) - if ftpConf['anonuserdlbw']: - assert ftpConf['anonuserdlbw'] == int(parsed['TransferRate RETR'][1]) - - if ftpConf['tls']: - parsed = parsed | parse_conf_file('tls') - - # These two are 'fixed' settings in proftpd.conf.mako, but they are important - assert parsed['TLSEngine'][1] == 'on' - assert parsed['TLSProtocol'][1] == 'TLSv1.2 TLSv1.3' - - if 'TLSOptions' in parsed: - # Following the same method from proftpd.conf.mako - tls_options = [] - for k, v in [ - ('allow_client_renegotiations', 'AllowClientRenegotiations'), - ('allow_dot_login', 'AllowDotLogin'), - ('allow_per_user', 'AllowPerUser'), - ('common_name_required', 'CommonNameRequired'), - ('enable_diags', 'EnableDiags'), - ('export_cert_data', 'ExportCertData'), - ('no_empty_fragments', 'NoEmptyFragments'), - ('no_session_reuse_required', 'NoSessionReuseRequired'), - ('stdenvvars', 'StdEnvVars'), - ('dns_name_required', 'dNSNameRequired'), - ('ip_address_required', 'iPAddressRequired'), - ]: - if ftpConf[f'tls_opt_{k}']: - tls_options.append(v) - - assert set(tls_options) == set(parsed['TLSOptions'][1].split()), \ - f"--- Unexpected difference ---\ntls_options:\n{set(tls_options)}"\ - f"\nparsed['TLSOptions']\n{set(parsed['TLSOptions'][1].split())}" - assert ftpConf['tls_policy'] == parsed['TLSRequired'][1] - # Do a sanity check on the certificate entries - assert 'TLSRSACertificateFile' in parsed - assert 'TLSRSACertificateKeyFile' in parsed - # Return the current welcome message - return ftpConf, motd - - -@contextlib.contextmanager -def ftp_configure(changes=None): - ''' - Apply requested FTP configuration changes. - Restore original setting when done - ''' - changes = changes or {} - ftpConf = call('ftp.config') - restore_keys = set(ftpConf) & set(changes) - restore_items = {key: ftpConf[key] for key in restore_keys} - if changes: - try: - call('ftp.update', changes) - yield - finally: - # Restore settings - call('ftp.update', restore_items) - # Validate the restore - validate_proftp_conf() - - -def ftp_set_service_enable_state(state=None): - ''' - Get and return the current state struct - Set the requested state - ''' - restore_setting = None - if state is not None: - assert isinstance(state, bool) - # save current setting - restore_setting = query_ftp_service()['enable'] - # update to requested setting - call('service.update', 'ftp', {'enable': state}) - - return restore_setting - - -@contextlib.contextmanager -def ftp_server(service_state=None): - ''' - Start FTP server with current config - Stop server when done - ''' - # service 'enable' state - if service_state is not None: - restore_state = ftp_set_service_enable_state(service_state) - - try: - # Start FTP service - call('service.start', 'ftp', {'silent': False}) - yield - finally: - # proftpd can core dump if stopped while it's busy - # processing a prior config change. Give it a sec. - sleep(1) - call('service.stop', 'ftp', {'silent': False}) - # Restore original service state - if service_state is not None: - ftp_set_service_enable_state(restore_state) - - -@contextlib.contextmanager -def ftp_anon_ds_and_srvr_conn(dsname='ftpdata', FTPconfig=None, useFTPS=None, withConn=None, **kwargs): - FTPconfig = FTPconfig or {} - withConn = withConn or True - - with dataset_asset(dsname, **kwargs) as ds: - ds_path = f"/mnt/{ds}" - - # Add files and dirs - ftp_dirs_and_files = INIT_DIRS_AND_FILES.copy() - ftp_dirs_and_files['path'] = ds_path - ftp_init_dirs_and_files(ftp_dirs_and_files) - - with ftp_server(): - anon_config = { - "onlyanonymous": True, - "anonpath": ds_path, - "onlylocal": False, - **FTPconfig - } - with ftp_configure(anon_config): - ftpConf, motd = validate_proftp_conf() - if withConn: - with (ftps_connection if useFTPS else ftp_connection)(truenas_server.ip) as ftp: - yield SimpleNamespace(ftp=ftp, dirs_and_files=ftp_dirs_and_files, - ftpConf=ftpConf, motd=motd) - - -@contextlib.contextmanager -def ftp_user_ds_and_srvr_conn(dsname='ftpdata', username="FTPlocal", FTPconfig=None, useFTPS=False, **kwargs): - FTPconfig = FTPconfig or {} - - with dataset_asset(dsname, **kwargs) as ds: - ds_path = f"/mnt/{ds}" - with ftp_user({ - "username": username, - "group_create": True, - "home": ds_path, - "full_name": username + " User", - "password": "secret", - "home_create": False, - "smb": False, - "groups": [call('group.query', [['name', '=', 'ftp']], {'get': True})['id']], - }): - # Add dirs and files - ftp_dirs_and_files = INIT_DIRS_AND_FILES.copy() - ftp_dirs_and_files['path'] = ds_path - ftp_init_dirs_and_files(ftp_dirs_and_files) - - with ftp_server(): - with ftp_configure(FTPconfig): - ftpConf, motd = validate_proftp_conf() - with (ftps_connection if useFTPS else ftp_connection)(truenas_server.ip) as ftp: - yield SimpleNamespace(ftp=ftp, dirs_and_files=ftp_dirs_and_files, ftpConf=ftpConf, motd=motd) - - -def ftp_get_users(): - ''' - Return a list of active users - NB: ftp service should be running when called - ''' - ssh_out = SSH_TEST("ftpwho -o json", user, password) - assert ssh_out['result'], str(ssh_out) - output = ssh_out['output'] - # Strip off trailing bogus data - joutput = output[:output.rindex('}') + 1] - whodata = json.loads(joutput) - return whodata['connections'] - - -# For resume xfer test -def upload_partial(ftp, src, tgt, NumKiB=128): - with open(src, 'rb') as file: - ftp.voidcmd('TYPE I') - with ftp.transfercmd(f'STOR {os.path.basename(tgt)}', None) as conn: - blksize = NumKiB // 8 - for xfer in range(0, 8): - # Send some of the file - buf = file.read(1024 * blksize) - assert buf, "Unexpected local read error" - conn.sendall(buf) - - -def download_partial(ftp, src, tgt, NumKiB=128): - with open(tgt, 'wb') as file: - ftp.voidcmd('TYPE I') - with ftp.transfercmd(f'RETR {os.path.basename(src)}', None) as conn: - NumXfers = NumKiB // 8 - for xfer in range(0, NumXfers): - # Receive and write some of the file - data = conn.recv(8192) - assert data, "Unexpected receive error" - file.write(data) - - -def ftp_upload_binary_file(ftpObj, source, target, offset=None): - """ - Upload a file to the FTP server - INPUT: - source is the full-path to local file - target is the name to use on the FTP server - RETURN: - Elapsed time to upload file - - """ - assert ftpObj is not None - assert source is not None - assert target is not None - - with open(source, 'rb') as fp: - if offset: - fp.seek(offset) - start = timer() - ftpObj.storbinary(f'STOR {os.path.basename(target)}', fp, rest=offset) - et = timer() - start - return et - - -def ftp_download_binary_file(ftpObj, source, target, offset=None): - """ - Download a file from the FTP server - INPUT: - source is the name of the file on the FTP server - target is full-path name on local host - RETURN: - Elapsed time to download file - """ - assert ftpObj is not None - assert source is not None - assert target is not None - opentype = 'ab' if offset else 'wb' - - with open(target, opentype) as fp: - start = timer() - ftpObj.retrbinary(f'RETR {os.path.basename(source)}', fp.write, rest=offset) - et = timer() - start - return et - - -def ftp_create_local_file(LocalPathName="", content=None): - ''' - Create a local file - INPUT: - If 'content' is: - - None, then create with touch - - 'int', then it represents the size in KiB to fill with random data - - 'str', then write that to the file - If 'content is not None, 'int' or 'str', then assert - RETURN: - tuple: (size_in_bytes, sha256_checksum) - ''' - assert LocalPathName != "", "empty file name" - b = '' if isinstance(content, str) else 'b' - # Create a local file - with open(LocalPathName, 'w' + b) as f: - if (content is None) or isinstance(content, str): - content = content or "" - f.write(content) - elif isinstance(content, int): - f.write(os.urandom(1024 * content)) - else: - assert True, f"Cannot create with content: '{content}'" - # Confirm existence - assert os.path.exists(LocalPathName) - localsize = os.path.getsize(LocalPathName) - - res = subprocess.run(["sha256sum", LocalPathName], capture_output=True) - local_chksum = res.stdout.decode().split()[0] - return (localsize, local_chksum) - - -def ftp_create_remote_file(RemotePathName="", content=None): - ''' - Create a remote file - INPUT: - If 'content' is: - - None, then create with touch - - 'int', then it represents the size in KiB to fill with random data - - 'str', then write that to the file - If 'content is not None, 'int' or 'str', then assert - RETURN: - tuple: (size_in_bytes, sha256_checksum) - ''' - assert RemotePathName != "", "empty file name" - if content is None: - ssh(f'touch {RemotePathName}') - elif isinstance(content, int): - ssh(f"dd if=/dev/urandom of={RemotePathName} bs=1K count={content}", complete_response=True) - elif isinstance(content, str): - ssh(f'echo "{content}" > {RemotePathName}') - else: - assert True, f"Cannot create with content: '{content}'" - - # Get and return the details - remotesize = ssh(f"du -b {RemotePathName}").split()[0] - remote_chksum = ssh(f"sha256sum {RemotePathName}").split()[0] - return (remotesize, remote_chksum) - - -def ftp_init_dirs_and_files(items=None): - if items is not None: - assert items['path'] is not None - path = items['path'] - for d in items['dirs']: - res = SSH_TEST(f"mkdir -p {path}/{d['name']}", user, password) - assert res['result'], str(res) - thispath = f"{path}/{d['name']}" - if 'contents' in d: - for f in d['contents']: - res = SSH_TEST(f"touch {thispath}/{f}", user, password) - assert res['result'], str(res) - if 'perm' in d: - res = SSH_TEST(f"chmod {d['perm']} {thispath}", user, password) - assert res['result'], str(res) - - for f in items['files']: - res = SSH_TEST(f"echo \'{f['contents']}\' > \'{path}/{f['name']}\'", user, password) - assert res['result'], str(res) - if 'perm' in f: - res = SSH_TEST(f"chmod {f['perm']} {path}/{f['name']}", user, password) - assert res['result'], str(res) - - -def init_test_data(type='unknown', data=None): - assert data is not None - new_test_data = {} - new_test_data['type'] = type - new_test_data['ftp'] = data.ftp - new_test_data['ftpConf'] = data.ftpConf - new_test_data['motd'] = data.motd - new_test_data['dirs_and_files'] = data.dirs_and_files - return new_test_data - - -def ftp_ipconnections_test(test_data=None, *extra): - ''' - Test FTP MaxConnectionsPerHost conf setting. - The DB equivalent is ipconnections. - NB1: This is called with an existing connection - ''' - assert test_data['ftp'] is not None - ftpConf = test_data['ftpConf'] - ConnectionLimit = int(ftpConf['ipconnections']) - # We already have one connection - NumConnects = 1 - NewConnects = [] - while NumConnects < ConnectionLimit: - try: - ftpConn = ftp_connect(truenas_server.ip) - except all_errors as e: - assert False, f"Unexpected connection error: {e}" - NewConnects.append(ftpConn) - NumConnects += 1 - CurrentFtpUsers = ftp_get_users() - assert len(CurrentFtpUsers) == ConnectionLimit - try: - # This next connect should fail - ftp_connect(truenas_server.ip) - except all_errors as e: - # An expected error - assert NumConnects == ConnectionLimit - assert e.args[0].startswith('530') - assert f"maximum number of connections ({ConnectionLimit})" in e.args[0] - finally: - # Clean up extra connections - for conn in NewConnects: - conn.quit() - - -def ftp_dir_listing_test(test_data=None, *extra): - ''' - Get a directory listing - ''' - - assert test_data is not None - ftp = test_data['ftp'] - listing = [name for name, facts in list(ftp.mlsd())] - expected = test_data['dirs_and_files'] - # Get expected - for f in expected['files']: - assert f['name'] in listing, f"Did not find {f['name']}" - for d in expected['dirs']: - assert f['name'] in listing, f"Did not find {f['name']}" - - -def ftp_download_files_test(test_data=None, run_data=None): - ''' - Retrieve files from server and confirm contents - ''' - - assert test_data is not None - ftp = test_data['ftp'] - expected_contents = None - for f in run_data: - if f['contents'] is None: - continue - expected_contents = f['contents'] - found_contents = [] - cmd = f"RETR {f['name']}" - try: - res = ftp.retrlines(cmd, found_contents.append) - assert f['expect_to_pass'] is True, \ - f"Expected file download failure for {f['name']}, but passed: {f}" - assert res.startswith('226 Transfer complete'), "Detected download failure" - assert expected_contents in found_contents - except all_errors as e: - assert f['expect_to_pass'] is False, \ - f"Expected file download success for {f['name']}, but failed: {e.args}" - - -def ftp_upload_files_test(test_data=None, run_data=None): - ''' - Upload files to the server - ''' - localfile = "/tmp/ftpfile" - - assert test_data is not None - assert run_data != [] - ftp = test_data['ftp'] - try: - for f in run_data: - if 'content' in f and isinstance(f['content'], str): - ftp_create_local_file(localfile, f['content']) - with open(localfile, 'rb') as tmpfile: - try: - cmd = f"STOR {f['name']}" - res = ftp.storlines(cmd, tmpfile) - assert f['expect_to_pass'] is True, \ - f"Expected file add failure for {f['name']}, but passed: {f}" - assert res.startswith('226 Transfer complete'), "Detected upload failure" - except all_errors as e: - assert f['expect_to_pass'] is False, \ - f"Expected file add success for {f['name']}, but failed: {e.args}" - finally: - # Clean up - if os.path.exists(localfile): - os.remove(localfile) - - -def ftp_delete_files_test(test_data=None, run_data=None): - ''' - Delete files on the server - ''' - assert test_data is not None - assert run_data != [] - ftp = test_data['ftp'] - for f in run_data: - try: - ftp.delete(f['name']) - assert f['expect_to_pass'] is True, \ - f"Expected file delete failure for {f['name']}, but passed: {f}" - except all_errors as e: - assert f['expect_to_pass'] is False, \ - f"Expected file delete success for {f['name']}, but failed: {e.args}" - - -def ftp_add_dirs_test(test_data=None, run_data=None): - ''' - Create directories on the server - ''' - assert test_data is not None - assert run_data != [] - ftp = test_data['ftp'] - for d in run_data: - try: - res = ftp.mkd(d['name']) - assert d['name'] in res - except all_errors as e: - assert d['expect_to_pass'] is False, \ - f"Expected deletion success for {d['name']}, but failed: {e.args}" - - -def ftp_remove_dirs_test(test_data=None, run_data=None): - ''' - Delete directories on the server - ''' - assert test_data is not None - assert run_data != [] - ftp = test_data['ftp'] - for d in run_data: - try: - ftp.rmd(d['name']) - assert d['expect_to_pass'] is True, \ - f"Expected deletion failure for {d['name']}, but passed: {d}" - except all_errors as e: - assert d['expect_to_pass'] is False, \ - f"Expected deletion success for {d['name']}, but failed: {e.args}" - -# -# ================== TESTS ========================= -# - - -@pytest.mark.dependency(name='init_dflt_config') -def test_001_validate_default_configuration(request, ftp_init_db_dflt): - ''' - Confirm the 'default' settings in the DB are in sync with what - is specified in the FTPModel class. These can get out of sync - with migration code. - NB1: This expects FTP to be in the default configuration - ''' - ftp_set_config(DB_DFLT) - - with ftp_server(): - # Get the DB settings - db = call('ftp.config') - - # Check each setting - diffs = {} - for setting in set(DB_DFLT) & set(db): - # Special cases: ftp_anonpath is 'nullable' in the DB, but the default is False - if setting == "anonpath" and (db[setting] == '' or db[setting] is None): - db[setting] = False - # Special cases: Restore 'None' for empty string - if setting in ['banner', 'options', 'masqaddress'] and db[setting] == '': - db[setting] = None - - if DB_DFLT[setting] != db[setting]: - diffs.update({setting: [DB_DFLT[setting], db[setting]]}) - - assert len(diffs) == 0, f"Found mismatches: [DB_DFLT, db]\n{diffs}" - - -def test_005_ftp_service_at_boot(request): - ''' - Confirm we can enable FTP service at boot and restore current setting - ''' - # Get the current state and set the new state - restore_setting = ftp_set_service_enable_state(True) - assert restore_setting is False, f"Unexpected service at boot setting: enable={restore_setting}, expected False" - - # Confirm we toggled the setting - res = query_ftp_service()['enable'] - assert res is True, res - - # Restore original setting - ftp_set_service_enable_state(restore_setting) - - -def test_010_ftp_service_start(request): - ''' - Confirm we can start the FTP service with the default config - Confirm the proftpd.conf file was generated - ''' - # Start FTP service - with ftp_server(): - # Validate the service is running via our API - assert query_ftp_service()['state'] == 'RUNNING' - - # Confirm we have /etc/proftpd/proftpd.conf - rv_conf = SSH_TEST("ls /etc/proftpd/proftpd.conf", user, password) - assert rv_conf['result'], str(rv_conf) - - -def test_015_ftp_configuration(request): - ''' - Confirm config changes get reflected in proftpd.conf - ''' - depends(request, ["init_dflt_config"], scope="session") - - with ftp_server(): - changes = { - 'clients': 100, - 'ipconnections': 10, - 'loginattempt': 100, - 'banner': 'A banner to remember', - 'onlylocal': True, - 'fxp': True - } - with ftp_configure(changes): - validate_proftp_conf() - - -def test_017_ftp_port(request): - ''' - Confirm config changes get reflected in proftpd.conf - ''' - depends(request, ["init_dflt_config"], scope="session") - - with ftp_server(): - assert query_ftp_service()['state'] == 'RUNNING' - - # Confirm FTP is listening on the default port - res = SSH_TEST("ss -tlpn", user, password) - sslist = res['output'].splitlines() - ftp_entry = [line for line in sslist if "ftp" in line] - ftpPort = ftp_entry[0].split()[3][2:] - assert ftpPort == "21", f"Expected default FTP port, but found {ftpPort}" - - # Test port change - changes = {'port': 22222} - with ftp_configure(changes): - validate_proftp_conf() - res = SSH_TEST("ss -tlpn", user, password) - sslist = res['output'].splitlines() - ftp_entry = [line for line in sslist if "ftp" in line] - ftpPort = ftp_entry[0].split()[3][2:] - assert ftpPort == "22222", f"Expected '22222' FTP port, but found {ftpPort}" - - -# @pytest.mark.parametrize("NumTries,expect_to_pass"m ) -@pytest.mark.parametrize('NumFailedTries,expect_to_pass', [ - (2, True), - (3, False) -]) -def test_020_login_attempts(request, NumFailedTries, expect_to_pass): - ''' - Test our ability to change and trap excessive failed login attempts - 1) Test good password before running out of tries - 2) Test good password after running out of tries - ''' - depends(request, ["init_dflt_config"], scope="session") - login_setup = { - "onlylocal": True, - "loginattempt": 3, - } - with ftp_user_ds_and_srvr_conn('ftplocalDS', 'FTPfatfingeruser', login_setup) as loginftp: - MaxTries = loginftp.ftpConf['loginattempt'] - ftpObj = loginftp.ftp - for login_attempt in range(0, NumFailedTries): - try: - # Attempt login with bad password - ftpObj.login(user='FTPfatfingeruser', passwd="secrfet") - except all_errors as all_e: - assert True, f"Unexpected login failure: {all_e}" - except EOFError as eof_e: - assert True, f"Unexpected disconnect: {eof_e}" - if expect_to_pass: - # Try with correct password - ftpObj.login(user='FTPfatfingeruser', passwd="secret") - assert expect_to_pass is True - else: - with pytest.raises(Exception): - # Try with correct password, but already exceeded number of tries - ftpObj.login(user='FTPfatfingeruser', passwd="secret") - assert login_attempt < MaxTries, "Failed to limit login attempts" - - -def test_030_root_login(request): - ''' - "Allow Root Login" setting has been removed. - Confirm we block root login. - ''' - depends(request, ["init_dflt_config"], scope="session") - with ftp_anon_ds_and_srvr_conn('anonftpDS') as ftpdata: - ftpObj = ftpdata.ftp - try: - res = ftpObj.login(user, password) - assert True, f"Unexpected behavior: root login was supposed to fail, but login response is {res}" - - except all_errors: - pass - - -@pytest.mark.parametrize('setting,ftpConfig', [ - (True, {"onlyanonymous": True, "anonpath": "anonftpDS", "onlylocal": False}), - (False, {"onlyanonymous": False, "anonpath": "", "onlylocal": True}), -]) -def test_031_anon_login(request, setting, ftpConfig): - ''' - Test the WebUI "Allow Anonymous Login" setting. - In our DB the setting is "onlyanonymous" and an "Anonymous" section in proftpd.conf. - ''' - depends(request, ["init_dflt_config"], scope="session") - if setting is True: - # Fixup anonpath - ftpConfig['anonpath'] = f"/mnt/{pool_name}/{ftpConfig['anonpath']}" - with ftp_anon_ds_and_srvr_conn('anonftpDS', ftpConfig) as ftpdata: - ftpObj = ftpdata.ftp - try: - res = ftpObj.login() - assert setting is True, \ - f"Unexpected behavior: onlyanonymous={ftpConfig['onlyanonymous']}, but login successfull: {res}" - - # The following assumes the login was successfull - assert res.startswith('230') - ftpusers = ftp_get_users() - assert 'ftp' == ftpusers[0]['user'] - except all_errors as e: - assert setting is False, f"Unexpected failure, onlyanonymous={setting}, but got {e}" - - -@pytest.mark.parametrize('localuser,expect_to_pass', [ - ("FTPlocaluser", True), - ("BadUser", False) -]) -def test_032_local_login(request, localuser, expect_to_pass): - depends(request, ["init_dflt_config"], scope="session") - with ftp_user_ds_and_srvr_conn('ftplocalDS', 'FTPlocaluser', {"onlylocal": True}) as ftpdata: - ftpObj = ftpdata.ftp - try: - ftpObj.login(localuser, 'secret') - assert expect_to_pass, f"Unexpected behavior: {user} should not have been allowed to login" - except all_errors as e: - assert not expect_to_pass, f"Unexpected behavior: {user} should have been allowed to login. {e}" - - -def test_040_reverse_dns(request): - depends(request, ["init_dflt_config"], scope="session") - ftp_conf = {"onlylocal": True, "reversedns": True} - with ftp_user_ds_and_srvr_conn('ftplocalDS', 'FTPlocaluser', ftp_conf) as ftpdata: - ftpObj = ftpdata.ftp - try: - ftpObj.login('FTPlocaluser', 'secret') - except all_errors as e: - assert False, f"Login failed with reverse DNS enabled. {e}" - - -@pytest.mark.parametrize('masq_type, expect_to_pass', - [("hostname", True), ("ip_addr", True), ("invalid.domain", False)]) -def test_045_masquerade_address(request, masq_type, expect_to_pass): - ''' - TrueNAS tooltip: - Public IP address or hostname. Set if FTP clients cannot connect through a NAT device. - We test masqaddress with: hostname, IP address and an invalid fqdn. - ''' - depends(request, ["init_dflt_config"], scope="session") - netconfig = call('network.configuration.config') - if masq_type == 'hostname': - masqaddr = netconfig['hostname'] - if netconfig['domain'] and netconfig['domain'] != "local": - masqaddr = masqaddr + "." + netconfig['domain'] - elif masq_type == 'ip_addr': - masqaddr = truenas_server.ip - else: - masqaddr = masq_type - - ftp_conf = {"onlylocal": True, "masqaddress": masqaddr} - with pytest.raises(Exception) if not expect_to_pass else contextlib.nullcontext(): - with ftp_user_ds_and_srvr_conn('ftplocalDS', 'FTPlocaluser', ftp_conf) as ftpdata: - ftpObj = ftpdata.ftp - try: - ftpObj.login('FTPlocaluser', 'secret') - res = ftpObj.sendcmd('PASV') - assert res.startswith("227 Entering Passive Mode") - srvr_ip, p1, p2 = res.split('(', 1)[1].split(')')[0].rsplit(',', 2) - srvr_ip = srvr_ip.replace(',', '.') - # If the masquerade is our hostname the presented IP address will - # be the 'local' IP address - if masq_type == "hostname": - assert srvr_ip == '127.0.0.1' - else: - assert srvr_ip == truenas_server.ip - except all_errors as e: - assert False, f"FTP failed with masqaddres = '{masqaddr}'. {e}" - - -@pytest.mark.parametrize('testing,ftpConfig,expect_to_pass', [ - ("config", {"passiveportsmin": 100}, False), - ("config", {"passiveportsmin": 3000, "passiveportsmax": 2000}, False), - ("config", {"passiveportsmin": 2000, "passiveportsmax": 2000}, False), - ("run", {"passiveportsmin": 22222, "passiveportsmax": 22223}, True), -]) -def test_050_passive_ports(request, testing, ftpConfig, expect_to_pass): - ''' - Test the passive port range setting. - NB: The proFTPd documentation for this setting states: - | Should no open ports be found within the configured range, the server will default - | to a random kernel-assigned port, and a message logged. - ''' - depends(request, ["init_dflt_config"], scope="session") - if testing == 'config': - try: - with ftp_configure(ftpConfig): - assert expect_to_pass is True - except Exception as e: - assert expect_to_pass is False, f"{e['error']}" - else: - with ftp_anon_ds_and_srvr_conn('anonftpDS', ftpConfig) as ftpdata: - ftpObj = ftpdata.ftp - try: - res = ftpObj.login() - # The confirm the login was successfull - assert res.startswith('230') - res = ftpObj.sendcmd('PASV') - assert res.startswith("227 Entering Passive Mode") - # The response includes the server IP and passive port - # Convert '227 Entering Passive Mode (a,b,c,d,e,f)' to ['a,b,c,d', 'e', 'f'] - srvr_ip, p1, p2 = res.split('(', 1)[1].split(')')[0].rsplit(',', 2) - # Calculate the passive port - pasv_port = int(p1) * 256 + int(p2) - assert srvr_ip.replace(',', '.') == truenas_server.ip - assert pasv_port == ftpdata.ftpConf['passiveportsmin'] - except all_errors as e: - assert expect_to_pass is False, f"Unexpected failure, {e}" - - -def test_055_no_activity_timeout(request): - ''' - Test the WebUI "Timeout" setting. In our DB it is "timeout" and "TimeoutIdle" in proftpd.conf. - | The TimeoutIdle directive configures the maximum number of seconds that proftpd will - ! allow clients to stay connected without receiving any data on either the control or data connection - ''' - depends(request, ["init_dflt_config"], scope="session") - with ftp_anon_ds_and_srvr_conn('anonftpDS', {'timeout': 3}) as ftpdata: - ftpObj = ftpdata.ftp - try: - ftpObj.login() - sleep(ftpdata.ftpConf['timeout'] + 1) - ftpObj.nlst() - assert False, "Unexpected behavior: 'Activity Timeout' did not occur. "\ - "Expected listing to fail, but it succeeded." - except all_errors as e: - chkstr = f"Idle timeout ({ftpdata.ftpConf['timeout']} seconds)" - assert chkstr in str(e), e - - -def test_056_no_xfer_timeout(request): - ''' - This tests the WebUI "Notransfer Timeout" setting. In our DB it is "timeout_notransfer" - and "TimeoutNoTranfer" in proftpd.conf. - | The TimeoutNoTransfer directive configures the maximum number of seconds a client - | is allowed to spend connected, after authentication, without issuing a data transfer command - | which results in a data connection (i.e. sending/receiving a file, or requesting a directory listing) - ''' - depends(request, ["init_dflt_config"], scope="session") - with ftp_anon_ds_and_srvr_conn('anonftpDS', {'timeout_notransfer': 3}) as ftpdata: - ftpObj = ftpdata.ftp - try: - ftpObj.login() - sleep(ftpdata.ftpConf['timeout_notransfer'] + 1) - ftpObj.nlst() - assert False, "Unexpected behavior: 'No Transfer Timeout' did not occur. "\ - "Expected listing to fail, but it succeeded." - except all_errors as e: - chkstr = f"No transfer timeout ({ftpdata.ftpConf['timeout_notransfer']} seconds)" - assert chkstr in str(e), e - - -@pytest.mark.flaky(reruns=5, reruns_delay=5) # Can sometimes getoside the range -@pytest.mark.parametrize('testwho,ftp_setup_func', [ - ('anon', ftp_anon_ds_and_srvr_conn), - ('local', ftp_user_ds_and_srvr_conn), -]) -def test_060_bandwidth_limiter(request, testwho, ftp_setup_func): - FileSize = 1024 # KiB - ulRate = 64 # KiB - dlRate = 128 # KiB - ulConf = testwho + 'userbw' - dlConf = testwho + 'userdlbw' - - depends(request, ["init_dflt_config"], scope="session") - ftp_anon_bw_limit = { - ulConf: ulRate, # upload limit - dlConf: dlRate # download limit - } - ftpfname = "BinaryFile" - - with ftp_setup_func(FTPconfig=ftp_anon_bw_limit) as ftpdata: - ftpObj = ftpdata.ftp - localfname = f"/tmp/{ftpfname}" - if testwho == 'anon': - results = SSH_TEST(f"chown ftp {ftpdata.ftpConf['anonpath']}", user, password) - assert results['result'] is True, results - try: - if testwho == 'anon': - ftpObj.login() - else: - ftpObj.login('FTPlocal', 'secret') - ftpObj.voidcmd('TYPE I') - - # Create local binary file - with open(localfname, 'wb') as f: - f.write(os.urandom(1024 * FileSize)) - - ElapsedTime = int(ftp_upload_binary_file(ftpObj, localfname, ftpfname)) - xfer_rate = FileSize // ElapsedTime - # This typically will match exactly, but in actual testing this might vary - assert (ulRate - 8) <= xfer_rate <= (ulRate + 20), \ - f"Failed upload rate limiter: Expected {ulRate}, but sensed rate is {xfer_rate}" - - ElapsedTime = int(ftp_download_binary_file(ftpObj, ftpfname, localfname)) - xfer_rate = FileSize // ElapsedTime - # Allow for variance - assert (dlRate - 8) <= xfer_rate <= (dlRate + 20), \ - f"Failed download rate limiter: Expected {dlRate}, but sensed rate is {xfer_rate}" - except all_errors as e: - assert False, f"Unexpected failure: {e}" - finally: - # Clean up - if os.path.exists(localfname): - os.remove(localfname) - - -@pytest.mark.parametrize('fmask,f_expect,dmask,d_expect', [ - ("000", "0666", "000", "0777"), - ("007", "0660", "002", "0775"), -]) -def test_065_umask(request, fmask, f_expect, dmask, d_expect): - depends(request, ["init_dflt_config"], scope="session") - localfile = "/tmp/localfile" - fname = "filemask" + fmask - dname = "dirmask" + dmask - ftp_create_local_file(localfile, "Contents of local file") - - ftp_umask = { - 'filemask': fmask, - 'dirmask': dmask - } - with ftp_anon_ds_and_srvr_conn('anonftpDS', ftp_umask, mode='777') as ftpdata: - ftpObj = ftpdata.ftp - try: - ftpObj.login() - - # Add file and make a directory - with open(localfile, 'rb') as tmpfile: - res = ftpObj.storlines(f'STOR {fname}', tmpfile) - assert "Transfer complete" in res - - res = ftpObj.mkd(dname) - assert dname in res - - ftpdict = dict(ftpObj.mlsd()) - assert ftpdict[fname]['unix.mode'] == f_expect, ftpdict[fname] - assert ftpdict[dname]['unix.mode'] == d_expect, ftpdict[dname] - - except all_errors as e: - assert False, f"Unexpected failure: {e}" - finally: - # Clean up - if os.path.exists(localfile): - os.remove(localfile) - - -@pytest.mark.dependency(depends=['init_dflt_config']) -@pytest.mark.parametrize( - 'ftpConf,expect_to_pass', [ - ({}, False), - ({'resume': True}, True) - ], - ids=[ - "resume xfer: blocked", - "resume xfer: allowed" - ] -) -@pytest.mark.parametrize( - 'direction,create_src,xfer_partial,xfer_remainder', [ - ('upload', ftp_create_local_file, upload_partial, ftp_upload_binary_file), - ('download', ftp_create_remote_file, download_partial, ftp_download_binary_file) - ], - ids=[ - "upload", - "download" - ] -) -def test_070_resume_xfer( - ftpConf, expect_to_pass, direction, create_src, xfer_partial, xfer_remainder -): - - # # ---------- helper functions --------- - def get_tgt_size(ftp, tgt, direction): - if direction == 'upload': - ftp.voidcmd('TYPE I') - return ftp.size(os.path.basename(tgt)) - else: - return os.path.getsize(tgt) - - def get_tgt_chksum(tgt, direction): - if direction == 'upload': - return ssh(f"sha256sum {tgt}").split()[0] - else: - res = subprocess.run(["sha256sum", tgt], capture_output=True) - assert res.returncode == 0 - return res.stdout.decode().split()[0] - - try: - # Run test - with ftp_anon_ds_and_srvr_conn('anonftpDS', ftpConf, withConn=False, mode='777') as ftpdata: - src_path = {'upload': "/tmp", 'download': f"{ftpdata.ftpConf['anonpath']}"} - tgt_path = {'upload': f"{ftpdata.ftpConf['anonpath']}", "download": "/tmp"} - - # xfer test - try: - # Create a 1MB source binary file. - src_pathname = '/'.join([src_path[direction], 'srcfile']) - tgt_pathname = '/'.join([tgt_path[direction], 'tgtfile']) - src_size, src_chksum = create_src(src_pathname, 1024) - - ftpObj = ftp_connect(truenas_server.ip) - ftpObj.login() - xfer_partial(ftpObj, src_pathname, tgt_pathname, 768) - - # Quit to simulate loss of connection - try: - ftpObj.quit() - except error_temp: - # May generate a quit error that we ignore for this test - pass - ftpObj = None - sleep(1) - - # Attempt resume to complete the upload - ftpObj = ftp_connect(truenas_server.ip) - ftpObj.login() - xfer_remainder(ftpObj, src_pathname, tgt_pathname, get_tgt_size(ftpObj, tgt_pathname, direction)) - except all_errors as e: - assert not expect_to_pass, f"Unexpected failure in resumed {direction} test: {e}" - if not expect_to_pass: - assert "Restart not permitted" in str(e), str(e) - - if expect_to_pass: - # Check upload result - tgt_size = get_tgt_size(ftpObj, tgt_pathname, direction) - assert int(tgt_size) == int(src_size), \ - f"Failed {direction} size test. Expected {src_size}, found {tgt_size}" - tgt_chksum = get_tgt_chksum(tgt_pathname, direction) - assert src_chksum == tgt_chksum, \ - f"Failed {direction} checksum test. Expected {src_chksum}, found {tgt_chksum}" - - finally: - try: - [os.remove(file) for file in ['/tmp/srcfile', '/tmp/tgtfile']] - except OSError: - pass - - -class UserTests: - """ - Run the same suite of tests for all users - """ - ftp_user_tests = [ - (ftp_dir_listing_test, []), - (ftp_ipconnections_test, []), - (ftp_download_files_test, [ - {'name': 'init_file', 'contents': "Contents of init_file", 'expect_to_pass': True}, - {'name': 'init_ro_file', 'contents': "RO data", 'expect_to_pass': True}, - ]), - (ftp_upload_files_test, [ - {'name': 'DeleteMeFile', 'content': 'To be deleted', 'expect_to_pass': True}, - {'name': 'init_ro_file', 'expect_to_pass': False}, - ]), - (ftp_delete_files_test, [ - {'name': 'DeleteMeFile', 'expect_to_pass': True}, - {'name': 'bogus_file', 'expect_to_pass': False}, - {'name': 'init_ro_dir/ReadOnlyDir_file1', 'expect_to_pass': False}, - ]), - (ftp_add_dirs_test, [ - {'name': 'DeleteMeDir', 'expect_to_pass': True}, - ]), - (ftp_remove_dirs_test, [ - {'name': 'DeleteMeDir', 'expect_to_pass': True}, - {'name': 'bogus_dir', 'expect_to_pass': False}, - {'name': 'init_ro_dir', 'expect_to_pass': False}, - ]) - ] - - @pytest.mark.parametrize("user_test,run_data", ftp_user_tests) - def test_080_ftp_user(self, setup, user_test, run_data): - try: - user_test(setup, run_data) - except all_errors as e: - assert e is None, f"FTP error: {e}" - - -class TestAnonUser(UserTests): - """ - Create a dataset with some data to be used for anonymous FTP - Start FTP server configured for anonymous - Create an anonymous FTP connection and login - """ - @pytest.fixture(scope='class') - def setup(self, request): - depends(request, ["init_dflt_config"], scope="session") - - with ftp_anon_ds_and_srvr_conn('anonftpDS') as anonftp: - # Make the directory owned by the anonymous ftp user - anon_path = anonftp.dirs_and_files['path'] - results = SSH_TEST(f"chown ftp {anon_path}", user, password) - assert results['result'] is True, results - login_error = None - ftpObj = anonftp.ftp - try: - res = ftpObj.login() - assert res.startswith('230 Anonymous access granted') - # anonymous clients should not get the welcome message - assert anonftp.motd.splitlines()[0] not in res - - # Run anonymous user tests with updated data - yield init_test_data('Anon', anonftp) - except all_errors as e: - login_error = e - assert login_error is None - - -class TestLocalUser(UserTests): - - @pytest.fixture(scope='class') - def setup(self, request): - depends(request, ["init_dflt_config"], scope="session") - - local_setup = { - "onlylocal": True, - } - with ftp_user_ds_and_srvr_conn('ftplocalDS', 'FTPlocaluser', local_setup) as localftp: - login_error = None - ftpObj = localftp.ftp - try: - res = ftpObj.login(user='FTPlocaluser', passwd="secret") - assert res.startswith('230') - # local users should get the welcome message - assert localftp.motd.splitlines()[0] in res - ftpusers = ftp_get_users() - assert "FTPlocaluser" == ftpusers[0]['user'] - - # Run the user tests with updated data - yield init_test_data('Local', localftp) - except all_errors as e: - login_error = e - assert login_error is None - - -class TestFTPSUser(UserTests): - - @pytest.fixture(scope='class') - def setup(self, request): - depends(request, ["init_dflt_config"], scope="session") - - # We include tls_opt_no_session_reuse_required because python - # ftplib has a long running issue with support for it. - tls_setup = { - "tls": True, - "tls_opt_no_session_reuse_required": True, - "ssltls_certificate": 1 - } - with ftp_user_ds_and_srvr_conn('ftpslocalDS', 'FTPSlocaluser', tls_setup, useFTPS=True) as tlsftp: - ftpsObj = tlsftp.ftp - login_error = None - try: - res = ftpsObj.login(user='FTPSlocaluser', passwd="secret") - assert res.startswith('230') - # local users should get the welcome message - assert tlsftp.motd.splitlines()[0] in res - ftpusers = ftp_get_users() - assert "FTPSlocaluser" == ftpusers[0]['user'] - - # Run the user tests with updated data - yield init_test_data('FTPS', tlsftp) - except all_errors as e: - login_error = e - assert login_error is None - - -@pytest.mark.skip(reason="Enable this when Jenkins infrastructure is better able to handle this test") -def test_085_ftp_service_starts_after_reboot(): - ''' - NAS-123024 - There is a bug in the Debian Bookwork proftpd install package - that enables proftpd.socket which blocks proftpd.service from starting. - - We fixed this by disabling proftpd.socket. There is a different fix - in a Bookworm update that involves refactoring the systemd unit files. - ''' - with ftp_server(True): # start ftp and configure it to start at boot - rv = query_ftp_service() - assert rv['state'] == 'RUNNING' - assert rv['enable'] is True - - reboot(truenas_server.ip) - - # wait for box to reboot - max_wait = 60 - ftp_state = None - for retry in range(max_wait): - try: - ftp_state = query_ftp_service() - break - except Exception: - sleep(1) - continue - - # make sure ftp service started after boot - assert ftp_state, 'Failed to query ftp service state after {max_wait!r} seconds' - assert ftp_state['state'] == 'RUNNING', f'Expected ftp service to be running, found {ftp_state["state"]!r}' - - -def test_100_ftp_service_stop(): - call('service.stop', 'ftp', {'silent': False}) - rv = query_ftp_service() - assert rv['state'] == 'STOPPED' - assert rv['enable'] is False diff --git a/tests/api2/test_210_group.py b/tests/api2/test_210_group.py deleted file mode 100644 index 3578ea8d9023a..0000000000000 --- a/tests/api2/test_210_group.py +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD -# Location for tests into REST API of FreeNAS - -import sys -import os -import json -import pytest -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET, POST, PUT, DELETE, SSH_TEST -from auto_config import user, password -from middlewared.test.integration.utils import call -from pytest_dependency import depends - - -def test_01_get_next_gid(): - results = GET('/group/get_next_gid/') - assert results.status_code == 200, results.text - global next_gid - next_gid = results.json() - - -# Create tests -def test_02_creating_group_testgroup(): - global groupid - payload = { - "gid": next_gid, - "name": "testgroup", - "smb": False, - } - results = POST("/group/", payload) - assert results.status_code == 200, results.text - groupid = results.json() - - -def test_03_look_group_is_created(): - assert len(GET('/group?group=testgroup').json()) == 1 - - -def test_04_check_group_exists(): - """ - get_group_obj is a wrapper around the grp module. - This check verifies that the group is _actually_ created. - """ - payload = { - "groupname": "testgroup" - } - results = POST("/group/get_group_obj/", payload) - assert results.status_code == 200, results.text - if results.status_code == 200: - gr = results.json() - assert gr['gr_gid'] == next_gid, results.text - - -def test_05_get_group_info(): - global groupinfo - groupinfo = GET('/group?group=testgroup').json()[0] - - -def test_06_look_group_name(): - assert groupinfo["group"] == "testgroup" - - -def test_07_look_group_full_name(): - assert groupinfo["gid"] == next_gid - - -def test_08_look_for_testgroup_is_in_freenas_group(request): - result = GET( - '/group', payload={ - 'query-filters': [['name', '=', 'testgroup']], - 'query-options': { - 'get': True, - 'extra': {'additional_information': ['SMB']} - } - } - ) - assert result.status_code == 200, result.text - assert result.json()['sid'] is None, result.text - - -def test_09_get_new_next_gid(): - results = GET('/group/get_next_gid/') - assert results.status_code == 200, results.text - global new_next_gid - new_next_gid = results.json() - - -def test_10_next_gid_and_new_next_gid_not_equal(): - assert new_next_gid != next_gid - - -# Update the testgroup -def test_11_updating_group_testgroup(): - payload = { - "gid": new_next_gid, - "name": "newgroup" - } - results = PUT("/group/id/%s" % groupid, payload) - assert results.status_code == 200, results.text - - -def test_12_get_group_new_info(): - global groupinfo - groupinfo = GET('/group?group=newgroup').json()[0] - - -def test_13_look_group_name(): - assert groupinfo["group"] == "newgroup" - - -def test_14_look_user_new_uid(): - assert groupinfo["gid"] == new_next_gid - - -def test_15_look_for_testgroup_is_not_in_freenas_group(request): - payload = { - "groupname": "testgroup" - } - results = POST("/group/get_group_obj/", payload) - assert results.status_code == 500, results.text - - -def test_16_look_for_newgroup_is_in_freenas_group(request): - payload = { - "groupname": "newgroup" - } - results = POST("/group/get_group_obj/", payload) - assert results.status_code == 200, results.text - - -# Delete the group -def test_19_delete_group_testgroup_newgroup(): - results = DELETE(f"/group/id/{groupid}/", {"delete_users": True}) - assert results.status_code == 200, results.text - - -@pytest.mark.parametrize('group', [ - {"root": 0}, - {"wheel": 0}, - {"nogroup": 65534}, - {"nobody": 65534} -]) -def test_35_check_builtin_groups(group): - """ - This check verifies the existence of targeted built-in groups - """ - g_name, g_id = list(group.items())[0] - gr = call("group.get_group_obj", {"groupname": g_name}) - assert gr['gr_gid'] == g_id, f"{g_name}: expected gid {g_id}, but got {gr['gr_gid']}" - - -@pytest.mark.parametrize('nss_obj', [ - ('group', 'root', 0), - ('group', 'nogroup', 65534) -]) -def test_36_check_builtin_duplicate_id_order(nss_obj): - # For compatibility with FreeBSD-based SCALE versions we - # map "wheel" to gid 0 and "nogroup" to gid 65534. This validate - # lookups by gid to return expected Linux names. - nss_type, name, xid = nss_obj - if nss_type == "group": - xid_key = "gid" - name_key = "gr_name" - else: - xid_key = "uid" - name_key = "pw_name" - - obj = call(f"{nss_type}.get_{nss_type}_obj", {xid_key: xid}) - assert obj[name_key] == name diff --git a/tests/api2/test_230_idmap.py b/tests/api2/test_230_idmap.py deleted file mode 100644 index d3109bb685ac2..0000000000000 --- a/tests/api2/test_230_idmap.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python3 - -import pytest -import sys -import os - -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import DELETE, GET, POST - -try: - from config import ( - LDAPBASEDN, - LDAPBINDDN, - LDAPBINDPASSWORD, - LDAPHOSTNAME, - ) -except ImportError: - Reason = 'LDAP* variable are not setup in config.py' - # comment pytestmark for development testing with --dev-test - pytestmark = pytest.mark.skipif(True, reason=Reason) - - -def test_create_idmap_certificate(): - global idmap_id - payload = { - 'name': 'BOB.NB', - 'range_low': 1000, - 'range_high': 2000, - 'certificate': 1, - "idmap_backend": "RFC2307", - 'options': { - "ldap_server": "STANDALONE", - "bind_path_user": LDAPBASEDN, - "bind_path_group": LDAPBASEDN, - "ldap_url": LDAPHOSTNAME, - "ldap_user_dn": LDAPBINDDN, - "ldap_user_dn_password": LDAPBINDPASSWORD, - "ssl": "ON", - "ldap_realm": False, - } - } - results = POST('/idmap/', payload) - assert results.status_code == 200, results.text - idmap_id = results.json()['id'] - - -def test_02_delete_used_certificate(): - results = DELETE(f'/idmap/id/{idmap_id}/') - assert results.status_code == 200, results.text - - -def test_03_verify_delete_job(): - results = GET(f'/idmap/id/{idmap_id}/') - assert results.status_code == 404, results.text diff --git a/tests/api2/test_260_iscsi.py b/tests/api2/test_260_iscsi.py deleted file mode 100644 index 1c38d4dcb755d..0000000000000 --- a/tests/api2/test_260_iscsi.py +++ /dev/null @@ -1,647 +0,0 @@ -#!/usr/bin/env python3 - -# License: BSD - -import os -import pytest -import random -import string -import sys -from time import sleep -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from auto_config import pool_name, hostname -from functions import PUT, POST, GET, SSH_TEST, DELETE -from middlewared.test.integration.utils.client import truenas_server - -try: - Reason = 'BSD host configuration is missing in ixautomation.conf' - from config import BSD_HOST, BSD_USERNAME, BSD_PASSWORD - bsd_host_cfg = pytest.mark.skipif(False, reason=Reason) -except ImportError: - bsd_host_cfg = pytest.mark.skipif(True, reason=Reason) - -digit = ''.join(random.choices(string.digits, k=2)) - -file_mountpoint = f'/tmp/iscsi-file-{hostname}' -zvol_mountpoint = f'/tmp/iscsi-zvol-{hostname}' -target_name = f"target{digit}" -basename = "iqn.2005-10.org.freenas.ctl" -zvol_name = f"ds{digit}" -zvol = f'{pool_name}/{zvol_name}' -zvol_url = zvol.replace('/', '%2F') - - -def has_session_present(target): - results = GET( - "/iscsi/global/sessions", payload={ - 'query-filters': [['target', '=', target]], - }) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - return bool(len(results.json())) - - -def waiting_for_iscsi_to_disconnect(base_target, wait): - timeout = 0 - # First check that the client no longer sees the target logged in - while timeout < wait: - cmd = 'iscsictl -L' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - if base_target not in results['output']: - break - timeout += 1 - sleep(1) - # Next check that the SCALE does not see a session to the target - while timeout < wait: - if not has_session_present(base_target): - return True - timeout += 1 - sleep(1) - else: - return False - - -@pytest.mark.dependency(name="iscsi_01") -def test_01_Add_iSCSI_initiator(): - global initiator_id - payload = { - 'comment': 'Default initiator', - } - results = POST("/iscsi/initiator/", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict), results.text - initiator_id = results.json()['id'] - - -@pytest.mark.dependency(name="iscsi_02") -def test_02_Add_iSCSI_portal(request): - depends(request, ["iscsi_01"]) - global portal_id - payload = { - 'listen': [ - { - 'ip': '0.0.0.0', - } - ] - } - results = POST("/iscsi/portal/", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict), results.text - portal_id = results.json()['id'] - - -@pytest.mark.dependency(name="iscsi_03") -def test_03_Add_iSCSI_target(request): - depends(request, ["iscsi_02"]) - global target_id - payload = { - 'name': target_name, - 'groups': [ - {'portal': portal_id} - ] - } - results = POST("/iscsi/target/", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict), results.text - target_id = results.json()['id'] - - -@pytest.mark.dependency(name="iscsi_04") -def test_04_Add_a_iSCSI_file_extent(request): - depends(request, ["iscsi_03"], scope="session") - global extent_id - payload = { - 'type': 'FILE', - 'name': 'extent', - 'filesize': 536870912, - 'path': f'/mnt/{pool_name}/dataset03/iscsi' - } - results = POST("/iscsi/extent/", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict), results.text - extent_id = results.json()['id'] - - -@pytest.mark.dependency(name="iscsi_05") -def test_05_Associate_iSCSI_target(request): - depends(request, ["iscsi_04"], scope="session") - global associate_id - payload = { - 'target': target_id, - 'lunid': 1, - 'extent': extent_id - } - results = POST("/iscsi/targetextent/", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict), results.text - associate_id = results.json()['id'] - - -@pytest.mark.dependency(name="iscsi_06") -def test_06_Enable_iSCSI_service(request): - depends(request, ["iscsi_05"]) - payload = {"enable": True} - results = PUT("/service/id/iscsitarget/", payload) - assert results.status_code == 200, results.text - - -@pytest.mark.dependency(name="iscsi_07") -def test_07_start_iSCSI_service(request): - depends(request, ["iscsi_05"]) - result = POST( - '/service/start', { - 'service': 'iscsitarget', - } - ) - assert result.status_code == 200, result.text - sleep(1) - - -def test_08_Verify_the_iSCSI_service_is_enabled(request): - depends(request, ["iscsi_05"]) - results = GET("/service/?service=iscsitarget") - assert results.status_code == 200, results.text - assert results.json()[0]["state"] == "RUNNING", results.text - - -@bsd_host_cfg -@pytest.mark.dependency(name="iscsi_09") -def test_09_Connecting_to_iSCSI_target(request): - depends(request, ["iscsi_05"], scope='session') - cmd = f'iscsictl -A -p {truenas_server.ip}:3260 -t {basename}:{target_name}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'] is True, f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -@pytest.mark.timeout(20) -@pytest.mark.dependency(name="iscsi_10") -def test_10_Waiting_for_iscsi_connection_before_grabbing_device_name(request): - depends(request, ["iscsi_09"], scope='session') - global file_device_name - file_device_name = "" - while True: - cmd = f'iscsictl -L | grep "{basename}:{target_name}"' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'] is True, f"{results['output']}, {results['stderr']}" - iscsictl_list = results['stdout'].strip().split() - if iscsictl_list[2] == "Connected:": - file_device_name = iscsictl_list[3] - assert True - break - sleep(1) - while True: - cmd = f'test -e /dev/{file_device_name}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - if results['result']: - assert True - break - - -@bsd_host_cfg -def test_11_Format_the_target_volume(request): - depends(request, ["iscsi_10"], scope='session') - cmd = f'umount "/media/{file_device_name}"' - SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - cmd2 = f'newfs "/dev/{file_device_name}"' - results = SSH_TEST(cmd2, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'] is True, f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_12_Creating_iSCSI_mountpoint(request): - depends(request, ["iscsi_10"], scope='session') - cmd = f'mkdir -p {file_mountpoint}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'] is True, f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -@pytest.mark.timeout(10) -def test_13_Mount_the_target_volume(request): - depends(request, ["iscsi_10"], scope='session') - cmd = f'mount "/dev/{file_device_name}" "{file_mountpoint}"' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'] is True, f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_14_Creating_file(request): - depends(request, ["iscsi_10"], scope='session') - cmd = 'touch "%s/testfile"' % file_mountpoint - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'] is True, f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_15_Moving_file(request): - depends(request, ["iscsi_10"], scope='session') - cmd = 'mv "%s/testfile" "%s/testfile2"' % (file_mountpoint, file_mountpoint) - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'] is True, f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_16_Copying_file(request): - depends(request, ["iscsi_10"], scope='session') - cmd = 'cp "%s/testfile2" "%s/testfile"' % (file_mountpoint, file_mountpoint) - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'] is True, f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_17_Deleting_file(request): - depends(request, ["iscsi_10"], scope='session') - results = SSH_TEST('rm "%s/testfile2"' % file_mountpoint, - BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'] is True, f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_19_Unmounting_iSCSI_volume(request): - depends(request, ["iscsi_10"], scope='session') - cmd = f'umount "{file_mountpoint}"' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'] is True, f"{results['output']}, {results['stderr']}" - sleep(1) - - -@bsd_host_cfg -def test_20_Removing_iSCSI_volume_mountpoint(request): - depends(request, ["iscsi_10"], scope='session') - cmd = f'rm -rf "{file_mountpoint}"' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'] is True, f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_21_Disconnect_iSCSI_target(request): - depends(request, ["iscsi_09"], scope='session') - cmd = f'iscsictl -R -t {basename}:{target_name}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'] is True, f"{results['output']}, {results['stderr']}" - # Currently FreeBSD (13.1-RELEASE-p5) does *not* issue a LOGOUT (verified by - # network capture), so give the target time to react. SCST will log an error, e.g. - # iscsi-scst: ***ERROR***: Connection 00000000e749085f with initiator iqn.1994-09.org.freebsd:freebsd13.local unexpectedly closed! - assert waiting_for_iscsi_to_disconnect(f'{basename}:{target_name}', 30) - - -def test_25_Delete_associate_iSCSI_file_targetextent(request): - depends(request, ["iscsi_05"], scope="session") - results = DELETE(f"/iscsi/targetextent/id/{associate_id}/", False) - assert results.status_code == 200, results.text - assert results.json(), results.text - - -def test_26_Delete_iSCSI_file_target(request): - depends(request, ["iscsi_03"]) - results = DELETE(f"/iscsi/target/id/{target_id}/", False) - assert results.status_code == 200, results.text - assert results.json(), results.text - - -def test_27_Delete_iSCSI_file_extent(request): - depends(request, ["iscsi_04"]) - results = DELETE(f"/iscsi/extent/id/{extent_id}/") - assert results.status_code == 200, results.text - assert results.json(), results.text - - -@pytest.mark.dependency(name="iscsi_28") -def test_28_creating_zvol_for_the_iscsi_share(request): - global results, payload - payload = { - 'name': zvol, - 'type': 'VOLUME', - 'volsize': 655360, - 'volblocksize': '16K' - } - results = POST("/pool/dataset/", payload) - assert results.status_code == 200, results.text - - -@pytest.mark.dependency(name="iscsi_29") -def test_29_add_iscsi_zvol_target(request): - depends(request, ["iscsi_28"]) - global zvol_target_id - payload = { - 'name': zvol_name, - 'groups': [ - {'portal': portal_id} - ] - } - results = POST("/iscsi/target/", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict), results.text - zvol_target_id = results.json()['id'] - - -@pytest.mark.dependency(name="iscsi_30") -def test_30_add_iscsi_a_zvol_extent(request): - depends(request, ["iscsi_28"]) - global zvol_extent_id - payload = { - 'type': 'DISK', - 'disk': f'zvol/{zvol}', - 'name': 'zvol_extent', - # 'filesize': 536870912, - # 'path': zvol - } - results = POST("/iscsi/extent/", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict), results.text - zvol_extent_id = results.json()['id'] - - -@pytest.mark.dependency(name="iscsi_31") -def test_31_associate_iscsi_zvol_target_and_zvol_extent(request): - depends(request, ["iscsi_30"]) - global zvol_associate_id - payload = { - 'target': zvol_target_id, - 'lunid': 1, - 'extent': zvol_extent_id - } - results = POST("/iscsi/targetextent/", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict), results.text - zvol_associate_id = results.json()['id'] - - -@pytest.mark.dependency(name="iscsi_32") -def test_32_restart_iscsi_service(request): - depends(request, ["iscsi_31"]) - result = POST('/service/restart', {'service': 'iscsitarget'}) - assert result.status_code == 200, result.text - sleep(1) - - -def test_33_verify_the_iscsi_service_is_running(request): - depends(request, ["iscsi_32"]) - results = GET("/service/?service=iscsitarget") - assert results.status_code == 200, results.text - assert results.json()[0]["state"] == "RUNNING", results.text - - -@bsd_host_cfg -@pytest.mark.dependency(name="iscsi_34") -def test_34_connecting_to_the_zvol_iscsi_target(request): - depends(request, ["iscsi_32"]) - cmd = f'iscsictl -A -p {truenas_server.ip}:3260 -t {basename}:{zvol_name}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -@pytest.mark.timeout(20) -@pytest.mark.dependency(name="iscsi_35") -def test_35_waiting_for_iscsi_connection_before_grabbing_device_name(request): - depends(request, ["iscsi_34"]) - global zvol_device_name - zvol_device_name = "" - while True: - cmd = f'iscsictl -L | grep {basename}:{zvol_name}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - if results['result'] and "Connected:" in results['output']: - zvol_device_name = results['stdout'].strip().split()[3] - assert True - break - sleep(1) - while True: - cmd = f'test -e /dev/{zvol_device_name}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - if results['result']: - assert True - break - - -@bsd_host_cfg -def test_36_format_the_target_volume(request): - depends(request, ["iscsi_35"]) - cmd = f'umount "/media/{file_device_name}"' - SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - cmd = f'newfs "/dev/{zvol_device_name}"' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -@pytest.mark.dependency(name="iscsi_37") -def test_37_creating_iscsi_mountpoint(request): - depends(request, ["iscsi_35"]) - cmd = f'mkdir -p {zvol_mountpoint}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -@pytest.mark.timeout(10) -@pytest.mark.dependency(name="iscsi_38") -def test_38_mount_the_zvol_target_volume(request): - depends(request, ["iscsi_37"]) - cmd = f'mount /dev/{zvol_device_name} {zvol_mountpoint}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_39_creating_file_in_zvol_iscsi_share(request): - depends(request, ["iscsi_38"]) - cmd = f'touch "{zvol_mountpoint}/myfile.txt"' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_40_moving_file_in_zvol_iscsi_share(request): - depends(request, ["iscsi_38"]) - cmd = f'mv "{zvol_mountpoint}/myfile.txt" "{zvol_mountpoint}/newfile.txt"' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_41_creating_a_directory_in_zvol_iscsi_share(request): - depends(request, ["iscsi_38"]) - cmd = f'mkdir "{zvol_mountpoint}/mydir"' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_42_copying_file_to_new_dir_in_zvol_iscsi_share(request): - depends(request, ["iscsi_38"]) - cmd = f'cp "{zvol_mountpoint}/newfile.txt" "{zvol_mountpoint}/mydir/myfile.txt"' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_44_unmounting_the_zvol_iscsi_volume(request): - depends(request, ["iscsi_38"]) - cmd = f'umount "{zvol_mountpoint}"' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_45_verify_the_zvol_mountpoint_is_empty(request): - depends(request, ["iscsi_38"]) - cmd = f'test -f {zvol_mountpoint}/newfile.txt' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert not results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_46_disconnect_iscsi_zvol_target(request): - depends(request, ["iscsi_34"]) - cmd = f'iscsictl -R -t {basename}:{zvol_name}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - assert waiting_for_iscsi_to_disconnect(f'{basename}:{zvol_name}', 30) - - -@bsd_host_cfg -@pytest.mark.dependency(name="iscsi_47") -def test_47_connecting_to_the_zvol_iscsi_target(request): - depends(request, ["iscsi_32"]) - cmd = f'iscsictl -A -p {truenas_server.ip}:3260 -t {basename}:{zvol_name}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -@pytest.mark.timeout(20) -@pytest.mark.dependency(name="iscsi_48") -def test_48_waiting_for_iscsi_connection_before_grabbing_device_name(request): - depends(request, ["iscsi_34"]) - global zvol_device_name - zvol_device_name = "" - while True: - cmd = f'iscsictl -L | grep {basename}:{zvol_name}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - if results['result'] and "Connected:" in results['output']: - zvol_device_name = results['stdout'].strip().split()[3] - assert True - break - sleep(1) - while True: - cmd = f'test -e /dev/{zvol_device_name}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - if results['result']: - assert True - break - - -@bsd_host_cfg -@pytest.mark.timeout(15) -@pytest.mark.dependency(name="iscsi_50") -def test_50_remount_the_zvol_target_volume(request): - depends(request, ["iscsi_48"]) - cmd = f'umount "/media/{file_device_name}"' - SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - cmd = f'mount /dev/{zvol_device_name} {zvol_mountpoint}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_51_verify_files_and_directory_was_kept_on_the_zvol_iscsi_share(request): - depends(request, ["iscsi_50"]) - cmd1 = f'test -f {zvol_mountpoint}/newfile.txt' - results1 = SSH_TEST(cmd1, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results1['result'], results1['output'] - cmd2 = f'test -f "{zvol_mountpoint}/mydir/myfile.txt"' - results2 = SSH_TEST(cmd2, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results2['result'], results2['output'] - - -@bsd_host_cfg -def test_52_unmounting_the_zvol_iscsi_volume(request): - depends(request, ["iscsi_50"]) - cmd = f'umount "{zvol_mountpoint}"' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - sleep(1) - - -@bsd_host_cfg -def test_53_removing_iscsi_volume_mountpoint(request): - depends(request, ["iscsi_50"]) - cmd = f'rm -rf "{zvol_mountpoint}"' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - - -@bsd_host_cfg -def test_54_redisconnect_iscsi_zvol_target(request): - depends(request, ["iscsi_47"]) - cmd = f'iscsictl -R -t {basename}:{zvol_name}' - results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST) - assert results['result'], f"{results['output']}, {results['stderr']}" - assert waiting_for_iscsi_to_disconnect(f'{basename}:{zvol_name}', 30) - - -def test_55_disable_iscsi_service(request): - depends(request, ["iscsi_06"]) - payload = {'enable': False} - results = PUT("/service/id/iscsitarget/", payload) - assert results.status_code == 200, results.text - - -@pytest.mark.dependency(name="iscsi_56") -def test_56_stop_iscsi_service(request): - depends(request, ["iscsi_32"]) - results = POST('/service/stop/', {'service': 'iscsitarget'}) - assert results.status_code == 200, results.text - sleep(1) - - -def test_57_verify_the_iscsi_service_is_disabled(request): - depends(request, ["iscsi_56"]) - results = GET("/service/?service=iscsitarget") - assert results.status_code == 200, results.text - assert results.json()[0]["state"] == "STOPPED", results.text - - -def test_58_delete_associate_iscsi_zvol_targe_and_zvol_textent(request): - depends(request, ["iscsi_31"]) - results = DELETE(f"/iscsi/targetextent/id/{zvol_associate_id}/", True) - assert results.status_code == 200, results.text - assert results.json(), results.text - - -def test_59_delete_iscsi_zvol_target(request): - depends(request, ["iscsi_29"]) - results = DELETE(f"/iscsi/target/id/{zvol_target_id}/", True) - assert results.status_code == 200, results.text - assert results.json(), results.text - - -def test_60_delete_iscsi_zvol_extent(request): - depends(request, ["iscsi_30"]) - results = DELETE(f"/iscsi/extent/id/{zvol_extent_id}/") - assert results.status_code == 200, results.text - assert results.json(), results.text - - -def test_61_delete_portal(request): - depends(request, ["iscsi_02"]) - results = DELETE(f"/iscsi/portal/id/{portal_id}/") - assert results.status_code == 200, results.text - assert results.json(), results.text - - -def test_62_delete_iscsi_initiator(request): - depends(request, ["iscsi_01"]) - results = DELETE(f"/iscsi/initiator/id/{initiator_id}/") - assert results.status_code == 200, results.text - assert results.json(), results.text - - -def test_63_delete_the_zvol_device_by_id(request): - depends(request, ["iscsi_28"]) - results = DELETE(f'/pool/dataset/id/{zvol_url}') - assert results.status_code == 200, results.text diff --git a/tests/api2/test_261_iscsi_cmd.py b/tests/api2/test_261_iscsi_cmd.py deleted file mode 100644 index 045a927fb9d70..0000000000000 --- a/tests/api2/test_261_iscsi_cmd.py +++ /dev/null @@ -1,2736 +0,0 @@ -import contextlib -import enum -import errno -import ipaddress -import os -import random -import socket -import string -from time import sleep - -import iscsi -import pyscsi -import pytest -import requests -from assets.websocket.iscsi import (alua_enabled, initiator, initiator_portal, - portal, read_capacity16, target, - target_extent_associate, verify_capacity, - verify_luns) -from middlewared.service_exception import InstanceNotFound, ValidationError, ValidationErrors -from middlewared.test.integration.assets.iscsi import target_login_test -from middlewared.test.integration.assets.pool import dataset, snapshot -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.utils.client import truenas_server -from pyscsi.pyscsi.scsi_sense import sense_ascq_dict -from pytest_dependency import depends - -from auto_config import ha, hostname, isns_ip, password, pool_name, user -from functions import SSH_TEST -from protocols import (initiator_name_supported, iscsi_scsi_connection, - isns_connection) - -# Setup some flags that will enable/disable tests based upon the capabilities of the -# python-scsi package in use -try: - from pyscsi.pyscsi.scsi_cdb_persistentreservein import PR_SCOPE, PR_TYPE - pyscsi_has_persistent_reservations = 'PersistentReserveOut' in dir(pyscsi.pyscsi.scsi) - LU_SCOPE = PR_SCOPE.LU_SCOPE -except ImportError: - pyscsi_has_persistent_reservations = False - LU_SCOPE = 0 -skip_persistent_reservations = pytest.mark.skipif(not pyscsi_has_persistent_reservations, - reason="PYSCSI does not support persistent reservations") - -skip_multi_initiator = pytest.mark.skipif(not initiator_name_supported(), - reason="PYSCSI does not support persistent reservations") - -skip_ha_tests = pytest.mark.skipif(not (ha and "virtual_ip" in os.environ), reason="Skip HA tests") - - -skip_invalid_initiatorname = pytest.mark.skipif(not initiator_name_supported(), - reason="Invalid initiatorname will be presented") - -pyscsi_has_report_target_port_groups = 'ReportTargetPortGroups' in dir(pyscsi.pyscsi.scsi) - -# See: https://github.com/python-scsi/cython-iscsi/pull/8 -pyscsi_supports_check_condition = hasattr(iscsi.Task, 'raw_sense') -skip_no_check_condition = pytest.mark.skipif(not pyscsi_supports_check_condition, "PYSCSI does not support CHECK CONDITION") - - -# The following strings are taken from pyscsi/pyscsi/scsi_exception -class CheckType(enum.Enum): - CHECK_CONDITION = "CheckCondition" - CONDITIONS_MET = "ConditionsMet" - BUSY_STATUS = "BusyStatus" - RESERVATION_CONFLICT = "ReservationConflict" - TASK_SET_FULL = "TaskSetFull" - ACA_ACTIVE = "ACAActive" - TASK_ABORTED = "TaskAborted" - - def __str__(self): - return self.value - - -# Some constants -MB = 1024 * 1024 -MB_100 = 100 * MB -MB_200 = 200 * MB -MB_256 = 256 * MB -MB_512 = 512 * MB -PR_KEY1 = 0xABCDEFAABBCCDDEE -PR_KEY2 = 0x00000000DEADBEEF -CONTROLLER_A_TARGET_PORT_GROUP_ID = 101 -CONTROLLER_B_TARGET_PORT_GROUP_ID = 102 - -# Some variables -digit = ''.join(random.choices(string.digits, k=2)) -file_mountpoint = f'/tmp/iscsi-file-{hostname}' -zvol_mountpoint = f'/tmp/iscsi-zvol-{hostname}' -target_name = f"target{digit}" -dataset_name = f"iscsids{digit}" -file_name = f"iscsi{digit}" -basename = "iqn.2005-10.org.freenas.ctl" -zvol_name = f"ds{digit}" -zvol = f'{pool_name}/{zvol_name}' - - -def snapshot_rollback(snapshot_id): - call('zfs.snapshot.rollback', snapshot_id) - - -def other_node(node): - if node == 'A': - return 'B' - if node == 'B': - return 'A' - raise ValueError("Invalid node supplied") - - -def get_ip_addr(ip): - try: - ipaddress.ip_address(ip) - return ip - except ValueError: - actual_ip = socket.gethostbyname(ip) - ipaddress.ip_address(actual_ip) - return actual_ip - - -@contextlib.contextmanager -def iscsi_auth(tag, user, secret, peeruser=None, peersecret=None): - payload = { - 'tag': tag, - 'user': user, - 'secret': secret, - } - if peeruser and peersecret: - payload.update({ - 'peeruser': peeruser, - 'peersecret': peersecret - }) - auth_config = call('iscsi.auth.create', payload) - - try: - yield auth_config - finally: - call('iscsi.auth.delete', auth_config['id']) - - -@contextlib.contextmanager -def file_extent(pool_name, dataset_name, file_name, filesize=MB_512, extent_name='extent', serial=None): - payload = { - 'type': 'FILE', - 'name': extent_name, - 'filesize': filesize, - 'path': f'/mnt/{pool_name}/{dataset_name}/{file_name}' - } - # We want to allow any non-None serial to be specified (even '') - if serial is not None: - payload.update({'serial': serial}) - extent_config = call('iscsi.extent.create', payload) - - try: - yield extent_config - finally: - call('iscsi.extent.delete', extent_config['id'], True, True) - - -@contextlib.contextmanager -def zvol_dataset(zvol, volsize=MB_512, recursive=False, force=False): - payload = { - 'name': zvol, - 'type': 'VOLUME', - 'volsize': volsize, - 'volblocksize': '16K' - } - dataset_config = call('pool.dataset.create', payload) - - try: - yield dataset_config - finally: - try: - call('pool.dataset.delete', dataset_config['id'], {'recursive': recursive, 'force': force}) - except InstanceNotFound: - pass - - -def modify_extent(ident, payload): - call('iscsi.extent.update', ident, payload) - - -def file_extent_resize(ident, filesize): - payload = { - 'filesize': filesize, - } - modify_extent(ident, payload) - - -def extent_disable(ident): - modify_extent(ident, {'enabled': False}) - - -def extent_enable(ident): - modify_extent(ident, {'enabled': True}) - - -def zvol_resize(zvol, volsize): - payload = { - 'volsize': volsize, - } - call('pool.dataset.update', zvol, payload) - - -def _get_iscsi_sessions(filters=None): - if filters: - return call('iscsi.global.sessions', filters) - else: - return call('iscsi.global.sessions') - - -def get_iscsi_sessions(filters=None, check_length=None): - if isinstance(check_length, int): - for _ in range(10): - data = _get_iscsi_sessions(filters) - if len(data) == check_length: - return data - sleep(1) - assert len(data) == check_length, data - else: - data = _get_iscsi_sessions(filters) - return data - - -def get_client_count(): - return call('iscsi.global.client_count') - - -def get_volthreading(zvolid): - return call('zfs.dataset.query', [['id', '=', zvolid]], {'get': True})['properties']['volthreading']['value'] - - -def verify_client_count(count, retries=10): - """Verify that the client count is the expected value, but include some - retries to allow things to settle if necessary.""" - assert retries > 0 - while retries: - if get_client_count() == count: - # All is good - return - retries -= 1 - sleep(1) - assert get_client_count() == count - - -@contextlib.contextmanager -def zvol_extent(zvol, extent_name='zvol_extent'): - payload = { - 'type': 'DISK', - 'disk': f'zvol/{zvol}', - 'name': extent_name, - } - extent_config = call('iscsi.extent.create', payload) - - try: - yield extent_config - finally: - try: - call('iscsi.extent.delete', extent_config['id'], True, True) - except InstanceNotFound: - pass - - -@contextlib.contextmanager -def configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name, alias=None, filesize=MB_512, extent_name='extent'): - portal_id = config['portal']['id'] - with target(target_name, [{'portal': portal_id}], alias) as target_config: - target_id = target_config['id'] - with dataset(dataset_name) as dataset_config: - with file_extent(pool_name, dataset_name, file_name, filesize=filesize, extent_name=extent_name) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id): - newconfig = config.copy() - newconfig.update({ - 'target': target_config, - 'dataset': dataset_config, - 'extent': extent_config, - }) - yield newconfig - - -@contextlib.contextmanager -def add_file_extent_target_lun(config, lun, filesize=MB_512, extent_name=None): - name = config['target']['name'] - target_id = config['target']['id'] - dataset_name = f"iscsids{name}" - lun_file_name = f'{name}_lun{lun}' - if not extent_name: - extent_name = lun_file_name - with file_extent(pool_name, dataset_name, lun_file_name, filesize=filesize, extent_name=extent_name) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id, lun): - newconfig = config.copy() - newconfig.update({ - f'extent_lun{lun}': extent_config, - }) - yield newconfig - - -@contextlib.contextmanager -def configured_target_to_zvol_extent(config, target_name, zvol, alias=None, extent_name='zvol_extent', volsize=MB_512): - portal_id = config['portal']['id'] - with target(target_name, [{'portal': portal_id}], alias) as target_config: - target_id = target_config['id'] - with zvol_dataset(zvol, volsize) as dataset_config: - with zvol_extent(zvol, extent_name=extent_name) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id) as associate_config: - newconfig = config.copy() - newconfig.update({ - 'associate': associate_config, - 'target': target_config, - 'dataset': dataset_config['id'], - 'extent': extent_config, - }) - yield newconfig - - -@contextlib.contextmanager -def add_zvol_extent_target_lun(config, lun, volsize=MB_512, extent_name=None): - name = config['target']['name'] - zvol_name = f"ds{name}" - zvol = f'{pool_name}/{zvol_name}_lun{lun}' - target_id = config['target']['id'] - lun_file_name = f'{name}_lun{lun}' - if not extent_name: - extent_name = lun_file_name - with zvol_dataset(zvol, volsize) as dataset_config: - with zvol_extent(zvol, extent_name=extent_name) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id, lun) as associate_config: - newconfig = config.copy() - newconfig.update({ - f'dataset_lun{lun}': dataset_config, - f'associate_lun{lun}': associate_config, - f'extent_lun{lun}': extent_config, - }) - yield newconfig - - -@contextlib.contextmanager -def configured_target(config, name, extent_type, alias=None, extent_size=MB_512): - assert extent_type in ["FILE", "VOLUME"] - if extent_type == "FILE": - ds_name = f"iscsids{name}" - with configured_target_to_file_extent(config, name, pool_name, ds_name, file_name, alias, extent_size, name) as newconfig: - yield newconfig - elif extent_type == "VOLUME": - zvol_name = f"ds{name}" - zvol = f'{pool_name}/{zvol_name}' - with configured_target_to_zvol_extent(config, name, zvol, alias, name, extent_size) as newconfig: - yield newconfig - - -@contextlib.contextmanager -def isns_enabled(delay=5): - payload = {'isns_servers': [isns_ip]} - call('iscsi.global.update', payload) - try: - yield - finally: - payload = {'isns_servers': []} - call('iscsi.global.update', payload) - if delay: - print(f'Sleeping for {delay} seconds after turning off iSNS') - sleep(delay) - - -def TUR(s): - """ - Perform a TEST UNIT READY. - - :param s: a pyscsi.SCSI instance - """ - s.testunitready() - # try: - # s.testunitready() - # except TypeError: - # s.testunitready() - - -def expect_check_condition(s, text=None, check_type=CheckType.CHECK_CONDITION): - """ - Expect a CHECK CONDITION containing the specified text. - - :param s: a pyscsi.SCSI instance - :param text: string expected as part of the CHECK CONDITION - :param check_type: CheckType enum of the expected CHECK_CONDITION - - Issue a TEST UNIT READY and verify that the expected CHECK CONDITION is raised. - - If this version of pyscsi(/cython-iscsi) does not support CHECK CONDITION - then just swallow the condition by issuing another TEST UNIT READY. - """ - assert check_type in CheckType, f"Parameter '{check_type}' is not a CheckType" - if pyscsi_supports_check_condition: - with pytest.raises(Exception) as excinfo: - s.testunitready() - - e = excinfo.value - assert e.__class__.__name__ == str(check_type), f"Unexpected CHECK CONDITION type. Got '{e.__class__.__name__}', expected {str(check_type)}" - if text: - assert text in str(e), f"Exception did not match: {text}" - else: - # If we cannot detect a CHECK CONDITION, then swallow it by retrying a TUR - try: - s.testunitready() - except TypeError: - s.testunitready() - - -def _verify_inquiry(s): - """ - Verify that the supplied SCSI has the expected INQUIRY response. - - :param s: a pyscsi.SCSI instance - """ - TUR(s) - r = s.inquiry() - data = r.result - assert data['t10_vendor_identification'].decode('utf-8').startswith("TrueNAS"), str(data) - assert data['product_identification'].decode('utf-8').startswith("iSCSI Disk"), str(data) - - -def get_target(targetid): - """ - Return target JSON data. - """ - return call('iscsi.target.get_instance', int(targetid)) - - -def get_targets(): - """ - Return a dictionary of target JSON data, keyed by target name. - """ - return {target['name']: target for target in call('iscsi.target.query')} - - -def modify_target(targetid, payload): - call('iscsi.target.update', targetid, payload) - - -def set_target_alias(targetid, newalias): - modify_target(targetid, {'alias': newalias}) - - -def set_target_initiator_id(targetid, initiatorid): - target_data = get_target(targetid) - - assert 'groups' in target_data, target_data - groups = target_data['groups'] - assert len(groups) == 1, target_data - - groups[0]['initiator'] = initiatorid - modify_target(targetid, {'groups': groups}) - - -def _get_service(service_name='iscsitarget'): - return call('service.query', [['service', '=', service_name]], {'get': True}) - - -@pytest.mark.dependency(name="iscsi_cmd_00") -def test_00_setup(request): - # Enable iSCSI service - payload = {"enable": True} - call('service.update', 'iscsitarget', payload) - # Start iSCSI service - call('service.start', 'iscsitarget') - sleep(1) - # Verify running - service = _get_service() - assert service['state'] == "RUNNING", service - - -def test_01_inquiry(request): - """ - This tests the Vendor and Product information in an INQUIRY response - are 'TrueNAS' and 'iSCSI Disk' respectively. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - with initiator(): - with portal() as portal_config: - portal_id = portal_config['id'] - with target(target_name, [{'portal': portal_id}]) as target_config: - target_id = target_config['id'] - with dataset(dataset_name): - with file_extent(pool_name, dataset_name, file_name) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id): - iqn = f'{basename}:{target_name}' - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - _verify_inquiry(s) - - -def test_02_read_capacity16(request): - """ - This tests that the target created returns the correct size to READ CAPACITY (16). - - It performs this test with a couple of sizes for both file & zvol based targets. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - with initiator(): - with portal() as portal_config: - portal_id = portal_config['id'] - with target(target_name, [{'portal': portal_id}]) as target_config: - target_id = target_config['id'] - with dataset(dataset_name): - # 100 MB file extent - with file_extent(pool_name, dataset_name, file_name, MB_100) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id): - iqn = f'{basename}:{target_name}' - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - verify_capacity(s, MB_100) - # 512 MB file extent - with file_extent(pool_name, dataset_name, file_name, MB_512) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id): - iqn = f'{basename}:{target_name}' - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - verify_capacity(s, MB_512) - # 100 MB zvol extent - with zvol_dataset(zvol, MB_100): - with zvol_extent(zvol) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id): - iqn = f'{basename}:{target_name}' - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - verify_capacity(s, MB_100) - # 512 MB zvol extent - with zvol_dataset(zvol): - with zvol_extent(zvol) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id): - iqn = f'{basename}:{target_name}' - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - verify_capacity(s, MB_512) - - -def target_test_readwrite16(ip, iqn): - """ - This tests WRITE SAME (16), READ (16) and WRITE (16) - operations on the specified target. - """ - zeros = bytearray(512) - deadbeef = bytearray.fromhex('deadbeef') * 128 - deadbeef_lbas = [1, 5, 7] - - with iscsi_scsi_connection(ip, iqn) as s: - TUR(s) - - # First let's write zeros to the first 12 blocks using WRITE SAME (16) - s.writesame16(0, 12, zeros) - - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - assert r.datain == zeros, r.datain - - # Now let's write DEADBEEF to a few LBAs using WRITE (16) - for lba in deadbeef_lbas: - s.write16(lba, 1, deadbeef) - - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - if lba in deadbeef_lbas: - assert r.datain == deadbeef, r.datain - else: - assert r.datain == zeros, r.datain - - # Drop the iSCSI connection and login again - with iscsi_scsi_connection(ip, iqn) as s: - TUR(s) - - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - if lba in deadbeef_lbas: - assert r.datain == deadbeef, r.datain - else: - assert r.datain == zeros, r.datain - - # Do a WRITE for > 1 LBA - s.write16(10, 2, deadbeef * 2) - - # Check results using READ (16) - deadbeef_lbas.extend([10, 11]) - for lba in range(0, 12): - r = s.read16(lba, 1) - if lba in deadbeef_lbas: - assert r.datain == deadbeef, r.datain - else: - assert r.datain == zeros, r.datain - - # Do a couple of READ (16) for > 1 LBA - # At this stage we have written deadbeef to LBAs 1,5,7,10,11 - r = s.read16(0, 2) - assert r.datain == zeros + deadbeef, r.datain - r = s.read16(1, 2) - assert r.datain == deadbeef + zeros, r.datain - r = s.read16(2, 2) - assert r.datain == zeros * 2, r.datain - r = s.read16(10, 2) - assert r.datain == deadbeef * 2, r.datain - - -def test_03_readwrite16_file_extent(request): - """ - This tests WRITE SAME (16), READ (16) and WRITE (16) operations with - a file extent based iSCSI target. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - with initiator_portal() as config: - with configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name): - iqn = f'{basename}:{target_name}' - target_test_readwrite16(truenas_server.ip, iqn) - - -def test_04_readwrite16_zvol_extent(request): - """ - This tests WRITE SAME (16), READ (16) and WRITE (16) operations with - a zvol extent based iSCSI target. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - with initiator_portal() as config: - with configured_target_to_zvol_extent(config, target_name, zvol): - iqn = f'{basename}:{target_name}' - target_test_readwrite16(truenas_server.ip, iqn) - - -@skip_invalid_initiatorname -def test_05_chap(request): - """ - This tests that CHAP auth operates as expected. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - user = "user1" - secret = 'sec1' + ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=10)) - with initiator(): - with portal() as portal_config: - portal_id = portal_config['id'] - auth_tag = 1 - with iscsi_auth(auth_tag, user, secret): - with target(target_name, [{'portal': portal_id, 'authmethod': 'CHAP', 'auth': auth_tag}]) as target_config: - target_id = target_config['id'] - with dataset(dataset_name): - with file_extent(pool_name, dataset_name, file_name) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id): - iqn = f'{basename}:{target_name}' - - # Try and fail to connect without supplying CHAP creds - with pytest.raises(RuntimeError) as ve: - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - TUR(s) - assert False, "Should not have been able to connect without CHAP credentials." - assert 'Unable to connect to' in str(ve), ve - - # Try and fail to connect supplying incorrect CHAP creds - with pytest.raises(RuntimeError) as ve: - with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, "WrongSecret") as s: - TUR(s) - assert False, "Should not have been able to connect without CHAP credentials." - assert 'Unable to connect to' in str(ve), ve - - # Finally ensure we can connect with the right CHAP creds - with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, secret) as s: - _verify_inquiry(s) - - -@skip_invalid_initiatorname -def test_06_mutual_chap(request): - """ - This tests that Mutual CHAP auth operates as expected. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - user = "user1" - secret = 'sec1' + ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=10)) - peer_user = "user2" - peer_secret = 'sec2' + ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=10)) - with initiator(): - with portal() as portal_config: - portal_id = portal_config['id'] - auth_tag = 1 - with iscsi_auth(auth_tag, user, secret, peer_user, peer_secret): - with target(target_name, [{'portal': portal_id, 'authmethod': 'CHAP_MUTUAL', 'auth': auth_tag}]) as target_config: - target_id = target_config['id'] - with dataset(dataset_name): - with file_extent(pool_name, dataset_name, file_name) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id): - iqn = f'{basename}:{target_name}' - - # Try and fail to connect without supplying Mutual CHAP creds - with pytest.raises(RuntimeError) as ve: - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - TUR(s) - assert False, "Should not have been able to connect without CHAP credentials." - assert 'Unable to connect to' in str(ve), ve - - # Try and fail to connect supplying incorrect CHAP creds (not mutual) - with pytest.raises(RuntimeError) as ve: - with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, "WrongSecret") as s: - TUR(s) - assert False, "Should not have been able to connect with incorrect CHAP credentials." - assert 'Unable to connect to' in str(ve), ve - - # Ensure we can connect with the right CHAP creds, if we *choose* not - # to validate things. - with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, secret) as s: - _verify_inquiry(s) - - # Try and fail to connect supplying incorrect Mutual CHAP creds - with pytest.raises(RuntimeError) as ve: - with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, secret, peer_user, "WrongSecret") as s: - TUR(s) - assert False, "Should not have been able to connect with incorrect Mutual CHAP credentials." - assert 'Unable to connect to' in str(ve), ve - - # Finally ensure we can connect with the right Mutual CHAP creds - with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, secret, peer_user, peer_secret) as s: - _verify_inquiry(s) - - -def test_07_report_luns(request): - """ - This tests REPORT LUNS and accessing multiple LUNs on a target. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - iqn = f'{basename}:{target_name}' - with initiator(): - with portal() as portal_config: - portal_id = portal_config['id'] - with target(target_name, [{'portal': portal_id}]) as target_config: - target_id = target_config['id'] - with dataset(dataset_name): - # LUN 0 (100 MB file extent) - with file_extent(pool_name, dataset_name, file_name, MB_100) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id): - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - verify_luns(s, [0]) - verify_capacity(s, MB_100) - # Now create a 512 MB zvol and associate with LUN 1 - with zvol_dataset(zvol): - with zvol_extent(zvol) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id, 1): - # Connect to LUN 0 - with iscsi_scsi_connection(truenas_server.ip, iqn, 0) as s0: - verify_luns(s0, [0, 1]) - verify_capacity(s0, MB_100) - # Connect to LUN 1 - with iscsi_scsi_connection(truenas_server.ip, iqn, 1) as s1: - verify_luns(s1, [0, 1]) - verify_capacity(s1, MB_512) - # Check again now that LUN 1 has been removed again. - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - verify_luns(s, [0]) - verify_capacity(s, MB_100) - - -def target_test_snapshot_single_login(ip, iqn, dataset_id): - """ - This tests snapshots with an iSCSI target using a single - iSCSI session. - """ - zeros = bytearray(512) - deadbeef = bytearray.fromhex('deadbeef') * 128 - deadbeef_lbas = [1, 5, 7] - all_deadbeef_lbas = [1, 5, 7, 10, 11] - - with iscsi_scsi_connection(ip, iqn) as s: - TUR(s) - - # First let's write zeros to the first 12 blocks using WRITE SAME (16) - s.writesame16(0, 12, zeros) - - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - assert r.datain == zeros, r.datain - - # Take snap0 - with snapshot(dataset_id, "snap0", get=True) as snap0_config: - - # Now let's write DEADBEEF to a few LBAs using WRITE (16) - for lba in deadbeef_lbas: - s.write16(lba, 1, deadbeef) - - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - if lba in deadbeef_lbas: - assert r.datain == deadbeef, r.datain - else: - assert r.datain == zeros, r.datain - - # Take snap1 - with snapshot(dataset_id, "snap1", get=True) as snap1_config: - - # Do a WRITE for > 1 LBA - s.write16(10, 2, deadbeef * 2) - - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - if lba in all_deadbeef_lbas: - assert r.datain == deadbeef, r.datain - else: - assert r.datain == zeros, r.datain - - # Now revert to snap1 - snapshot_rollback(snap1_config['id']) - - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - if lba in deadbeef_lbas: - assert r.datain == deadbeef, r.datain - else: - assert r.datain == zeros, r.datain - - # Now revert to snap0 - snapshot_rollback(snap0_config['id']) - - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - assert r.datain == zeros, r.datain - - -def target_test_snapshot_multiple_login(ip, iqn, dataset_id): - """ - This tests snapshots with an iSCSI target using multiple - iSCSI sessions. - """ - zeros = bytearray(512) - deadbeef = bytearray.fromhex('deadbeef') * 128 - deadbeef_lbas = [1, 5, 7] - all_deadbeef_lbas = [1, 5, 7, 10, 11] - - with iscsi_scsi_connection(ip, iqn) as s: - TUR(s) - - # First let's write zeros to the first 12 blocks using WRITE SAME (16) - s.writesame16(0, 12, zeros) - - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - assert r.datain == zeros, r.datain - - # Take snap0 - with snapshot(dataset_id, "snap0", get=True) as snap0_config: - - with iscsi_scsi_connection(ip, iqn) as s: - TUR(s) - - # Now let's write DEADBEEF to a few LBAs using WRITE (16) - for lba in deadbeef_lbas: - s.write16(lba, 1, deadbeef) - - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - if lba in deadbeef_lbas: - assert r.datain == deadbeef, r.datain - else: - assert r.datain == zeros, r.datain - - # Take snap1 - with snapshot(dataset_id, "snap1", get=True) as snap1_config: - - with iscsi_scsi_connection(ip, iqn) as s: - TUR(s) - - # Do a WRITE for > 1 LBA - s.write16(10, 2, deadbeef * 2) - - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - if lba in all_deadbeef_lbas: - assert r.datain == deadbeef, r.datain - else: - assert r.datain == zeros, r.datain - - # Now revert to snap1 - snapshot_rollback(snap1_config['id']) - - with iscsi_scsi_connection(ip, iqn) as s: - TUR(s) - - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - if lba in deadbeef_lbas: - assert r.datain == deadbeef, r.datain - else: - assert r.datain == zeros, r.datain - - # Now revert to snap0 - snapshot_rollback(snap0_config['id']) - - with iscsi_scsi_connection(ip, iqn) as s: - TUR(s) - # Check results using READ (16) - for lba in range(0, 12): - r = s.read16(lba, 1) - assert r.datain == zeros, r.datain - - -def test_08_snapshot_zvol_extent(request): - """ - This tests snapshots with a zvol extent based iSCSI target. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - iqn = f'{basename}:{target_name}' - with initiator_portal() as config: - with configured_target_to_zvol_extent(config, target_name, zvol) as iscsi_config: - target_test_snapshot_single_login(truenas_server.ip, iqn, iscsi_config['dataset']) - with configured_target_to_zvol_extent(config, target_name, zvol) as iscsi_config: - target_test_snapshot_multiple_login(truenas_server.ip, iqn, iscsi_config['dataset']) - - -def test_09_snapshot_file_extent(request): - """ - This tests snapshots with a file extent based iSCSI target. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - iqn = f'{basename}:{target_name}' - with initiator_portal() as config: - with configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name) as iscsi_config: - target_test_snapshot_single_login(truenas_server.ip, iqn, iscsi_config['dataset']) - with configured_target_to_zvol_extent(config, target_name, zvol) as iscsi_config: - target_test_snapshot_multiple_login(truenas_server.ip, iqn, iscsi_config['dataset']) - - -def test_10_target_alias(request): - """ - This tests iSCSI target alias. - - At the moment SCST does not use the alias usefully (e.g. TargetAlias in - LOGIN response). When this is rectified this test should be extended. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - - data = {} - for t in ["A", "B"]: - data[t] = {} - data[t]['name'] = f"{target_name}{t.lower()}" - data[t]['alias'] = f"{target_name}{t}_alias" - data[t]['file'] = f"{target_name}{t}_file" - - A = data['A'] - B = data['B'] - with initiator_portal() as config: - with configured_target_to_file_extent(config, A['name'], pool_name, dataset_name, A['file'], A['alias']) as iscsi_config: - with target(B['name'], [{'portal': iscsi_config['portal']['id']}]) as targetB_config: - with file_extent(pool_name, dataset_name, B['file'], extent_name="extentB") as extentB_config: - with target_extent_associate(targetB_config['id'], extentB_config['id']): - # Created two targets, one with an alias, one without. Check them. - targets = get_targets() - assert targets[A['name']]['alias'] == A['alias'], targets[A['name']]['alias'] - assert targets[B['name']]['alias'] is None, targets[B['name']]['alias'] - - # Update alias for B - set_target_alias(targets[B['name']]['id'], B['alias']) - targets = get_targets() - assert targets[A['name']]['alias'] == A['alias'], targets[A['name']]['alias'] - assert targets[B['name']]['alias'] == B['alias'], targets[B['name']]['alias'] - - # Clear alias for A - set_target_alias(targets[A['name']]['id'], "") - targets = get_targets() - assert targets[A['name']]['alias'] is None, targets[A['name']]['alias'] - assert targets[B['name']]['alias'] == B['alias'], targets[B['name']]['alias'] - - # Clear alias for B - set_target_alias(targets[B['name']]['id'], "") - targets = get_targets() - assert targets[A['name']]['alias'] is None, targets[A['name']]['alias'] - assert targets[B['name']]['alias'] is None, targets[B['name']]['alias'] - - -def test_11_modify_portal(request): - """ - Test that we can modify a target portal. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - with portal() as portal_config: - assert portal_config['comment'] == 'Default portal', portal_config - # First just change the comment - payload = {'comment': 'New comment'} - call('iscsi.portal.update', portal_config['id'], payload) - new_config = call('iscsi.portal.get_instance', portal_config['id']) - assert new_config['comment'] == 'New comment', new_config - # Then try to reapply everything - payload = {'comment': 'test1', 'discovery_authmethod': 'NONE', 'discovery_authgroup': None, 'listen': [{'ip': '0.0.0.0'}]} - # payload = {'comment': 'test1', 'discovery_authmethod': 'NONE', 'discovery_authgroup': None, 'listen': [{'ip': '0.0.0.0'}, {'ip': '::'}]} - call('iscsi.portal.update', portal_config['id'], payload) - new_config = call('iscsi.portal.get_instance', portal_config['id']) - assert new_config['comment'] == 'test1', new_config - - -def test_12_pblocksize_setting(request): - """ - This tests whether toggling pblocksize has the desired result on READ CAPACITY 16, i.e. - whether setting it results in LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT being zero. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - iqn = f'{basename}:{target_name}' - with initiator_portal() as config: - with configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name) as iscsi_config: - extent_config = iscsi_config['extent'] - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - TUR(s) - data = s.readcapacity16().result - # By default 512 << 3 == 4096 - assert data['lbppbe'] == 3, data - - # First let's just change the blocksize to 2K - payload = {'blocksize': 2048} - call('iscsi.extent.update', extent_config['id'], payload) - - expect_check_condition(s, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED" - - data = s.readcapacity16().result - assert data['block_length'] == 2048, data - assert data['lbppbe'] == 1, data - - # Now let's change it back to 512, but also set pblocksize - payload = {'blocksize': 512, 'pblocksize': True} - call('iscsi.extent.update', extent_config['id'], payload) - - expect_check_condition(s, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED" - - data = s.readcapacity16().result - assert data['block_length'] == 512, data - assert data['lbppbe'] == 0, data - - with configured_target_to_zvol_extent(config, target_name, zvol) as iscsi_config: - extent_config = iscsi_config['extent'] - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - TUR(s) - data = s.readcapacity16().result - # We created a vol with volblocksize == 16K (512 << 5) - assert data['lbppbe'] == 5, data - - # First let's just change the blocksize to 4K - payload = {'blocksize': 4096} - call('iscsi.extent.update', extent_config['id'], payload) - - expect_check_condition(s, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED" - - data = s.readcapacity16().result - assert data['block_length'] == 4096, data - assert data['lbppbe'] == 2, data - - # Now let's also set pblocksize - payload = {'pblocksize': True} - call('iscsi.extent.update', extent_config['id'], payload) - - TUR(s) - data = s.readcapacity16().result - assert data['block_length'] == 4096, data - assert data['lbppbe'] == 0, data - - -def generate_name(length, base="target"): - result = f"{base}-{length}-" - remaining = length - len(result) - assert remaining >= 0, f"Function not suitable for such a short length: {length}" - return result + ''.join(random.choices(string.ascii_lowercase + string.digits, k=remaining)) - - -@pytest.mark.parametrize('extent_type', ["FILE", "VOLUME"]) -def test_13_test_target_name(request, extent_type): - """ - Test the user-supplied target name. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - - with initiator_portal() as config: - name64 = generate_name(64) - with configured_target(config, name64, extent_type): - iqn = f'{basename}:{name64}' - target_test_readwrite16(truenas_server.ip, iqn) - - name65 = generate_name(65) - with pytest.raises(ValidationErrors) as ve: - with configured_target(config, name65, extent_type): - assert False, f"Should not have been able to create a target with name length {len(name65)}." - assert ve.value.errors == [ - ValidationError('iscsi_extent_create.name', 'The value may not be longer than 64 characters', errno.EINVAL), - ] - - -@pytest.mark.parametrize('extent_type', ["FILE", "VOLUME"]) -def test_14_target_lun_extent_modify(request, extent_type): - """ - Perform some tests of the iscsi.targetextent.update API, including - trying tp provide invalid - """ - depends(request, ["iscsi_cmd_00"], scope="session") - - name1 = f'{target_name}1' - name2 = f'{target_name}2' - name3 = f'{target_name}3' - name4 = f'{target_name}4' - - @contextlib.contextmanager - def expect_lun_in_use_failure(): - with pytest.raises(ValidationErrors) as ve: - yield - assert False, "Should not be able to associate because LUN in use" - assert "LUN ID is already being used for this target." in str(ve.value) - - @contextlib.contextmanager - def expect_extent_in_use_failure(): - with pytest.raises(ValidationErrors) as ve: - yield - assert False, "Should not be able to associate because extent in use" - assert "Extent is already in use" in str(ve.value) - - # The following will create the extents with the same name as the target. - with initiator_portal() as config: - with configured_target(config, name1, extent_type) as config1: - with configured_target(config, name2, extent_type) as config2: - with configured_target(config, name3, extent_type) as config3: - # Create an extra extent to 'play' with - with zvol_dataset(zvol): - with zvol_extent(zvol, extent_name=name4) as config4: - # First we will attempt some new, but invalid associations - - # LUN in use - with expect_lun_in_use_failure(): - payload = { - 'target': config1['target']['id'], - 'lunid': 0, - 'extent': config4['id'] - } - call('iscsi.targetextent.create', payload) - - # extent in use - with expect_extent_in_use_failure(): - payload = { - 'target': config1['target']['id'], - 'lunid': 1, - 'extent': config2['extent']['id'] - } - call('iscsi.targetextent.create', payload) - - # Now succeed in creating a new target/lun/extent association - payload = { - 'target': config1['target']['id'], - 'lunid': 1, - 'extent': config4['id'] - } - call('iscsi.targetextent.create', payload) - - # Get the current config - textents = call('iscsi.targetextent.query') - - # Now perform some updates that will not succeed - textent4 = next(textent for textent in textents if textent['extent'] == config4['id']) - - # Attempt some invalid updates - # LUN in use - with expect_lun_in_use_failure(): - payload = { - 'target': textent4['target'], - 'lunid': 0, - 'extent': textent4['extent'] - } - call('iscsi.targetextent.update', textent4['id'], payload) - - # extent in use in another target - with expect_extent_in_use_failure(): - payload = { - 'target': textent4['target'], - 'lunid': textent4['lunid'], - 'extent': config3['extent']['id'] - } - call('iscsi.targetextent.update', textent4['id'], payload) - - # extent in use in this target - with expect_extent_in_use_failure(): - payload = { - 'target': textent4['target'], - 'lunid': textent4['lunid'], - 'extent': config1['extent']['id'] - } - call('iscsi.targetextent.update', textent4['id'], payload) - - # Move a target to LUN 1 - textent2 = next(textent for textent in textents if textent['extent'] == config2['extent']['id']) - payload = { - 'target': textent2['target'], - 'lunid': 1, - 'extent': textent2['extent'] - } - call('iscsi.targetextent.update', textent2['id'], payload) - - # Try to move it (to target1) just by changing the target, will clash - with expect_lun_in_use_failure(): - payload = { - 'target': config1['target']['id'], - 'lunid': 1, - 'extent': textent2['extent'] - } - call('iscsi.targetextent.update', textent2['id'], payload) - - # But can move it elsewhere (target3) - payload = { - 'target': config3['target']['id'], - 'lunid': 1, - 'extent': textent2['extent'] - } - call('iscsi.targetextent.update', textent2['id'], payload) - - # Delete textent4 association - call('iscsi.targetextent.delete', textent4['id']) - - # Now can do the move that previously failed - payload = { - 'target': config1['target']['id'], - 'lunid': 1, - 'extent': textent2['extent'] - } - call('iscsi.targetextent.update', textent2['id'], payload) - - # Restore it - payload = { - 'target': config2['target']['id'], - 'lunid': 0, - 'extent': textent2['extent'] - } - call('iscsi.targetextent.update', textent2['id'], payload) - - -def _isns_wait_for_iqn(isns_client, iqn, timeout=10): - iqns = set(isns_client.list_targets()) - while timeout > 0 and iqn not in iqns: - sleep(1) - iqns = set(isns_client.list_targets()) - return iqns - - -def test_15_test_isns(request): - """ - Test ability to register targets with iSNS. - """ - # Will use a more unique target name than usual, just in case several test - # runs are hitting the same iSNS server at the same time. - depends(request, ["iscsi_cmd_00"], scope="session") - _host = socket.gethostname() - _rand = ''.join(random.choices(string.digits + string.ascii_lowercase, k=12)) - _name_base = f'isnstest:{_host}:{_rand}' - _target1 = f'{_name_base}:1' - _target2 = f'{_name_base}:2' - _initiator = f'iqn.2005-10.org.freenas.ctl:isnstest:{_name_base}:initiator' - _iqn1 = f'{basename}:{_target1}' - _iqn2 = f'{basename}:{_target1}' - - with isns_connection(isns_ip, _initiator) as isns_client: - # First let's ensure that the targets are not already present. - base_iqns = set(isns_client.list_targets()) - for iqn in [_iqn1, _iqn2]: - assert iqn not in base_iqns, iqn - - # Create target1 and ensure it is still not present (because we - # haven't switched on iSNS yet). - with initiator_portal() as config: - with configured_target_to_file_extent(config, - _target1, - pool_name, - dataset_name, - file_name) as iscsi_config: - iqns = set(isns_client.list_targets()) - assert _iqn1 not in iqns, _iqn1 - - # Now turn on the iSNS server - with isns_enabled(): - iqns = _isns_wait_for_iqn(isns_client, _iqn1) - assert _iqn1 in iqns, _iqn1 - - # Create another target and ensure it shows up too - with target(_target2, - [{'portal': iscsi_config['portal']['id']}] - ) as target2_config: - target_id = target2_config['id'] - with zvol_dataset(zvol): - with zvol_extent(zvol) as extent_config: - extent_id = extent_config['id'] - with target_extent_associate(target_id, extent_id): - iqns = _isns_wait_for_iqn(isns_client, _iqn2) - for inq in [_iqn1, _iqn2]: - assert iqn in iqns, iqn - - # Now that iSNS is disabled again, ensure that our target is - # no longer advertised - iqns = set(isns_client.list_targets()) - assert _iqn1 not in iqns, _iqn1 - - # Finally let's ensure that neither target is present. - base_iqns = set(isns_client.list_targets()) - for iqn in [_iqn1, _iqn2]: - assert iqn not in base_iqns, iqn - - -class TestFixtureInitiatorName: - """Fixture for test_16_invalid_initiator_name""" - - iqn = f'{basename}:{target_name}' - - @pytest.fixture(scope='class') - def create_target(self): - with initiator_portal() as config: - with configured_target(config, target_name, "FILE"): - yield - - params = [ - (None, True), - ("iqn.1991-05.com.microsoft:fake-host", True), - ("iqn.1991-05.com.microsoft:fake-/-host", False), - ("iqn.1991-05.com.microsoft:fake-#-host", False), - ("iqn.1991-05.com.microsoft:fake-%s-host", False), - ("iqn.1991-05.com.microsoft:unicode-\u6d4b\u8bd5-ok", True), # 测试 - ("iqn.1991-05.com.microsoft:unicode-\u30c6\u30b9\u30c8-ok", True), # テスト - ("iqn.1991-05.com.microsoft:unicode-\u180E-bad", False), # Mongolian vowel separator - ("iqn.1991-05.com.microsoft:unicode-\u2009-bad", False), # Thin Space - ("iqn.1991-05.com.microsoft:unicode-\uFEFF-bad", False), # Zero width no-break space - ] - - @pytest.mark.parametrize("initiator_name, expected", params) - def test_16_invalid_initiator_name(self, request, create_target, initiator_name, expected): - """ - Deliberately send SCST some invalid initiator names and ensure it behaves OK. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - - if expected: - with iscsi_scsi_connection(truenas_server.ip, TestFixtureInitiatorName.iqn, initiator_name=initiator_name) as s: - _verify_inquiry(s) - else: - with pytest.raises(RuntimeError) as ve: - with iscsi_scsi_connection(truenas_server.ip, TestFixtureInitiatorName.iqn, initiator_name=initiator_name) as s: - assert False, "Should not have been able to connect with invalid initiator name." - assert 'Unable to connect to' in str(ve), ve - - -def _pr_check_registered_keys(s, expected=[]): - opcodes = s.device.opcodes - data = s.persistentreservein(opcodes.PERSISTENT_RESERVE_IN.serviceaction.READ_KEYS) - assert len(data.result['reservation_keys']) == len(expected), data.result - if len(expected): - expected_set = set(expected) - received_set = set(data.result['reservation_keys']) - assert expected_set == received_set, received_set - return data.result - - -def _pr_check_reservation(s, expected={'reservation_key': None, 'scope': None, 'type': None}): - opcodes = s.device.opcodes - data = s.persistentreservein(opcodes.PERSISTENT_RESERVE_IN.serviceaction.READ_RESERVATION) - for key, value in expected.items(): - actual_value = data.result.get(key) - assert value == actual_value, data.result - return data.result - - -def _pr_register_key(s, value): - opcodes = s.device.opcodes - s.persistentreserveout(opcodes.PERSISTENT_RESERVE_OUT.serviceaction.REGISTER, - service_action_reservation_key=value) - - -def _pr_unregister_key(s, value): - opcodes = s.device.opcodes - s.persistentreserveout(opcodes.PERSISTENT_RESERVE_OUT.serviceaction.REGISTER, - reservation_key=value, - service_action_reservation_key=0) - - -def _pr_reserve(s, pr_type, scope=LU_SCOPE, **kwargs): - opcodes = s.device.opcodes - s.persistentreserveout(opcodes.PERSISTENT_RESERVE_OUT.serviceaction.RESERVE, - scope=scope, - pr_type=pr_type, - **kwargs) - - -def _pr_release(s, pr_type, scope=LU_SCOPE, **kwargs): - opcodes = s.device.opcodes - s.persistentreserveout(opcodes.PERSISTENT_RESERVE_OUT.serviceaction.RELEASE, - scope=scope, - pr_type=pr_type, - **kwargs) - - -@contextlib.contextmanager -def _pr_registration(s, key): - _pr_register_key(s, key) - try: - yield - finally: - _pr_unregister_key(s, key) - # There is room for improvement here wrt SPC-5 5.14.11.2.3, but not urgent as - # we are hygenic wrt releasing reservations before unregistering keys - - -@contextlib.contextmanager -def _pr_reservation(s, pr_type, scope=LU_SCOPE, other_connections=[], **kwargs): - assert s not in other_connections, "Invalid parameter mix" - _pr_reserve(s, pr_type, scope, **kwargs) - try: - yield - finally: - _pr_release(s, pr_type, scope, **kwargs) - # Do processing as specified by SPC-5 5.14.11.2.2 Releasing - # For the time being we will ignore the NUAR bit from SPC-5 7.5.11 Control mode page - if pr_type in [PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY, - PR_TYPE.EXCLUSIVE_ACCESS_REGISTRANTS_ONLY, - PR_TYPE.WRITE_EXCLUSIVE_ALL_REGISTRANTS, - PR_TYPE.EXCLUSIVE_ACCESS_ALL_REGISTRANTS]: - sleep(5) - for s2 in other_connections: - expect_check_condition(s2, sense_ascq_dict[0x2A04]) # "RESERVATIONS RELEASED" - - -@skip_persistent_reservations -@pytest.mark.dependency(name="iscsi_basic_persistent_reservation") -def test_17_basic_persistent_reservation(request): - depends(request, ["iscsi_cmd_00"], scope="session") - with initiator_portal() as config: - with configured_target_to_zvol_extent(config, target_name, zvol): - iqn = f'{basename}:{target_name}' - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - TUR(s) - - _pr_check_registered_keys(s, []) - _pr_check_reservation(s) - - with _pr_registration(s, PR_KEY1): - _pr_check_registered_keys(s, [PR_KEY1]) - _pr_check_reservation(s) - - with _pr_reservation(s, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1): - _pr_check_registered_keys(s, [PR_KEY1]) - _pr_check_reservation(s, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE}) - - _pr_check_registered_keys(s, [PR_KEY1]) - _pr_check_reservation(s) - - _pr_check_registered_keys(s, []) - _pr_check_reservation(s) - - -@contextlib.contextmanager -def _pr_expect_reservation_conflict(s): - try: - yield - assert False, "Failed to get expected PERSISTENT CONFLICT" - except Exception as e: - if e.__class__.__name__ != str(CheckType.RESERVATION_CONFLICT): - raise e - - -def _check_target_rw_paths(s1, s2): - """ - Check that the two supplied paths can read/write data, and they point at the same LUN. - """ - zeros = bytearray(512) - deadbeef = bytearray.fromhex('deadbeef') * 128 - abba = bytearray.fromhex('abbaabba') * 128 - - # First let's write zeros to the first 12 blocks using WRITE SAME (16) - s1.writesame16(0, 12, zeros) - - # Check results using READ (16) - for s in (s1, s2): - for lba in range(0, 12): - r = s.read16(lba, 1) - assert r.datain == zeros, r.datain - - # Update some blocks from each initiator using WRITE SAME - s1.writesame16(0, 6, deadbeef) - s2.writesame16(6, 6, abba) - - # Check results using READ (16) - for s in (s1, s2): - for lba in range(0, 6): - r = s.read16(lba, 1) - assert r.datain == deadbeef, r.datain - for lba in range(6, 12): - r = s.read16(lba, 1) - assert r.datain == abba, r.datain - - -def _check_persistent_reservations(s1, s2): - # - # First just do a some basic tests (register key, reserve, release, unregister key) - # - _pr_check_registered_keys(s1, []) - _pr_check_reservation(s1) - _pr_check_registered_keys(s2, []) - _pr_check_reservation(s2) - - with _pr_registration(s1, PR_KEY1): - _pr_check_registered_keys(s1, [PR_KEY1]) - _pr_check_reservation(s1) - _pr_check_registered_keys(s2, [PR_KEY1]) - _pr_check_reservation(s2) - - with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1, other_connections=[s2]): - _pr_check_registered_keys(s1, [PR_KEY1]) - _pr_check_reservation(s1, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE}) - _pr_check_registered_keys(s2, [PR_KEY1]) - _pr_check_reservation(s2, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE}) - - _pr_check_registered_keys(s1, [PR_KEY1]) - _pr_check_reservation(s1) - _pr_check_registered_keys(s2, [PR_KEY1]) - _pr_check_reservation(s2) - - with _pr_registration(s2, PR_KEY2): - _pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2]) - _pr_check_reservation(s1) - _pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2]) - _pr_check_reservation(s2) - - with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1, other_connections=[s2]): - _pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2]) - _pr_check_reservation(s1, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE}) - _pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2]) - _pr_check_reservation(s2, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE}) - - _pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2]) - _pr_check_reservation(s1) - _pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2]) - _pr_check_reservation(s2) - - with _pr_reservation(s2, PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY, reservation_key=PR_KEY2, other_connections=[s1]): - _pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2]) - _pr_check_reservation(s1, {'reservation_key': PR_KEY2, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY}) - _pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2]) - _pr_check_reservation(s2, {'reservation_key': PR_KEY2, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY}) - - _pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2]) - _pr_check_reservation(s1) - _pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2]) - _pr_check_reservation(s2) - - _pr_check_registered_keys(s1, [PR_KEY1]) - _pr_check_reservation(s1) - _pr_check_registered_keys(s2, [PR_KEY1]) - _pr_check_reservation(s2) - - _pr_check_registered_keys(s1, []) - _pr_check_reservation(s1) - _pr_check_registered_keys(s2, []) - _pr_check_reservation(s2) - - # - # Now let's fail some stuff - # See: - # - SPC-5 5.14 Table 66 - # - SBC-4 4.17 Table 13 - # - zeros = bytearray(512) - dancing_queen = bytearray.fromhex('00abba00') * 128 - deadbeef = bytearray.fromhex('deadbeef') * 128 - with _pr_registration(s1, PR_KEY1): - with _pr_registration(s2, PR_KEY2): - - # With registrations only, both initiators can write - s1.write16(0, 1, deadbeef) - s2.write16(1, 1, dancing_queen) - r = s1.read16(1, 1) - assert r.datain == dancing_queen, r.datain - r = s2.read16(0, 1) - assert r.datain == deadbeef, r.datain - - with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1, other_connections=[s2]): - s1.writesame16(0, 2, zeros) - r = s2.read16(0, 2) - assert r.datain == zeros + zeros, r.datain - - with _pr_expect_reservation_conflict(s2): - s2.write16(1, 1, dancing_queen) - - r = s2.read16(0, 2) - assert r.datain == zeros + zeros, r.datain - - with _pr_expect_reservation_conflict(s2): - with _pr_reservation(s2, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY2): - pass - - with _pr_reservation(s1, PR_TYPE.EXCLUSIVE_ACCESS, reservation_key=PR_KEY1, other_connections=[s2]): - with _pr_expect_reservation_conflict(s2): - r = s2.read16(0, 2) - assert r.datain == zeros + zeros, r.datain - - with _pr_reservation(s1, PR_TYPE.EXCLUSIVE_ACCESS_REGISTRANTS_ONLY, reservation_key=PR_KEY1, other_connections=[s2]): - r = s2.read16(0, 2) - assert r.datain == zeros + zeros, r.datain - - # s2 no longer is registered - with _pr_reservation(s1, PR_TYPE.EXCLUSIVE_ACCESS_REGISTRANTS_ONLY, reservation_key=PR_KEY1): - with _pr_expect_reservation_conflict(s2): - r = s2.read16(0, 2) - assert r.datain == zeros + zeros, r.datain - - with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY, reservation_key=PR_KEY1): - r = s2.read16(0, 2) - assert r.datain == zeros + zeros, r.datain - - -@skip_persistent_reservations -@skip_multi_initiator -def test_18_persistent_reservation_two_initiators(request): - depends(request, ["iscsi_cmd_00"], scope="session") - with initiator_portal() as config: - with configured_target_to_zvol_extent(config, target_name, zvol): - iqn = f'{basename}:{target_name}' - with iscsi_scsi_connection(truenas_server.ip, iqn) as s1: - TUR(s1) - initiator_name2 = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}:second" - with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_name2) as s2: - TUR(s2) - _check_persistent_reservations(s1, s2) - - -def _serial_number(s): - x = s.inquiry(evpd=1, page_code=0x80) - return x.result['unit_serial_number'].decode('utf-8') - - -def _device_identification(s): - result = {} - x = s.inquiry(evpd=1, page_code=0x83) - for desc in x.result['designator_descriptors']: - if desc['designator_type'] == 4: - result['relative_target_port_identifier'] = desc['designator']['relative_port'] - if desc['designator_type'] == 5: - result['target_port_group'] = desc['designator']['target_portal_group'] - if desc['designator_type'] == 3 and desc['designator']['naa'] == 6: - items = (desc['designator']['naa'], - desc['designator']['ieee_company_id'], - desc['designator']['vendor_specific_identifier'], - desc['designator']['vendor_specific_identifier_extension'] - ) - result['naa'] = "0x{:01x}{:06x}{:09x}{:016x}".format(*items) - return result - - -def _verify_ha_inquiry(s, serial_number, naa, tpgs=0, - vendor='TrueNAS', product_id='iSCSI Disk'): - """ - Verify that the supplied SCSI has the expected INQUIRY response. - - :param s: a pyscsi.SCSI instance - """ - TUR(s) - inq = s.inquiry().result - assert inq['t10_vendor_identification'].decode('utf-8').startswith(vendor) - assert inq['product_identification'].decode('utf-8').startswith(product_id) - assert inq['tpgs'] == tpgs - assert serial_number == _serial_number(s) - assert naa == _device_identification(s)['naa'] - - -def _get_node(timeout=None): - return call('failover.node') - - -def _get_ha_failover_status(): - # Make sure we're talking to the master - return call('failover.status') - - -def _get_ha_remote_failover_status(): - return call('failover.call_remote', 'failover.status') - - -def _get_ha_failover_in_progress(): - # Make sure we're talking to the master - return call('failover.in_progress') - - -def _check_master(): - status = _get_ha_failover_status() - assert status == 'MASTER' - - -def _check_ha_node_configuration(): - both_nodes = ['A', 'B'] - # Let's perform some sanity checking wrt controller and IP address - # First get node and calculate othernode - node = _get_node() - assert node in both_nodes - _check_master() - - # Now let's get IPs and ensure that - # - Node A has truenas_server.nodea_ip - # - Node B has truenas_server.nodeb_ip - # We will need this later when we start checking TPG, etc - ips = {} - for anode in both_nodes: - ips[anode] = set() - if anode == node: - interfaces = call('interface.query') - else: - interfaces = call('failover.call_remote', 'interface.query') - - for i in interfaces: - for alias in i['state']['aliases']: - if alias.get('type') == 'INET': - ips[anode].add(alias['address']) - # Ensure that truenas_server.nodea_ip and truenas_server.nodeb_ip are what we expect - assert truenas_server.nodea_ip in ips['A'] - assert truenas_server.nodea_ip not in ips['B'] - assert truenas_server.nodeb_ip in ips['B'] - assert truenas_server.nodeb_ip not in ips['A'] - - -def _verify_ha_device_identification(s, naa, relative_target_port_identifier, target_port_group): - x = _device_identification(s) - assert x['naa'] == naa, x - assert x['relative_target_port_identifier'] == relative_target_port_identifier, x - assert x['target_port_group'] == target_port_group, x - - -def _verify_ha_report_target_port_groups(s, tpgs, active_tpg): - """ - Verify that the REPORT TARGET PORT GROUPS command returns the expected - results. - """ - x = s.reporttargetportgroups() - for tpg_desc in x.result['target_port_group_descriptors']: - tpg_id = tpg_desc['target_port_group'] - ids = set([x['relative_target_port_id'] for x in tpg_desc['target_ports']]) - assert ids == set(tpgs[tpg_id]), ids - # See SPC-5 6.36 REPORT TARGET PORT GROUPS - # Active/Optimized is 0 - # Active/Non-optimized is 1 - if tpg_id == active_tpg: - assert tpg_desc['asymmetric_access_state'] == 0, tpg_desc - else: - assert tpg_desc['asymmetric_access_state'] == 1, tpg_desc - - -def _get_active_target_portal_group(): - _check_master() - node = _get_node() - if node == 'A': - return CONTROLLER_A_TARGET_PORT_GROUP_ID - elif node == 'B': - return CONTROLLER_B_TARGET_PORT_GROUP_ID - return None - - -def _wait_for_alua_settle(retries=20): - print("Checking ALUA status...") - while retries: - if call('iscsi.alua.settled'): - print("ALUA is settled") - break - retries -= 1 - print("Waiting for ALUA to settle") - sleep(5) - - -def _ha_reboot_master(delay=900): - """ - Reboot the MASTER node and wait for both the new MASTER - and new BACKUP to become available. - """ - get_node_timeout = 20 - orig_master_node = _get_node() - new_master_node = other_node(orig_master_node) - - call('system.reboot') - - # First we'll loop until the node is no longer the orig_node - new_master = False - while not new_master: - try: - # There are times when we don't get a response at all (albeit - # in a bhyte HA-VM pair), so add a timeout to catch this situation. - if _get_node(timeout=get_node_timeout) == new_master_node: - new_master = True - break - except requests.exceptions.Timeout: - delay = delay - get_node_timeout - except Exception: - delay = delay - 1 - if delay <= 0: - break - print("Waiting for MASTER") - sleep(1) - - if not new_master: - raise RuntimeError('Did not switch to new controller.') - - # OK, we're on the new master, now wait for the other controller - # to become BACKUP. - new_backup = False - while not new_backup: - try: - if _get_ha_remote_failover_status() == 'BACKUP': - new_backup = True - break - except Exception: - pass - delay = delay - 5 - if delay <= 0: - break - print("Waiting for BACKUP") - sleep(5) - - if not new_backup: - raise RuntimeError('Backup controller did not surface.') - - # Ensure that a failover is still not in progress - in_progress = True - while in_progress: - try: - in_progress = _get_ha_failover_in_progress() - if not in_progress: - break - except Exception: - pass - delay = delay - 5 - if delay <= 0: - break - print("Waiting while in progress") - sleep(5) - - if in_progress: - raise RuntimeError('Failover never completed.') - - # Finally check the ALUA status - _wait_for_alua_settle() - - -def _ensure_alua_state(state): - results = call('iscsi.global.config') - assert results['alua'] == state, results - - -@pytest.mark.dependency(name="iscsi_alua_config") -@pytest.mark.timeout(900) -def test_19_alua_config(request): - """ - Test various aspects of ALUA configuration. - - When run against a HA system this test will perform TWO reboots to - test failover wrt iSCSI ALUA targets. - - The second reboot was added to return the system to the original ACTIVE - node. This means that subsequent tests will run on the same node that - the previous tests started on, thereby simplifying log analysis. - """ - # First ensure ALUA is off - _ensure_alua_state(False) - - if ha: - _check_ha_node_configuration() - - # Next create a target - with initiator_portal() as config: - with configured_target_to_file_extent(config, - target_name, - pool_name, - dataset_name, - file_name - ) as iscsi_config: - # Login to the target and ensure that things look reasonable. - iqn = f'{basename}:{target_name}' - api_serial_number = iscsi_config['extent']['serial'] - api_naa = iscsi_config['extent']['naa'] - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - _verify_ha_inquiry(s, api_serial_number, api_naa) - - if ha: - # Only perform this section on a HA system - - with alua_enabled(): - _ensure_alua_state(True) - _wait_for_alua_settle() - - # We will login to the target on BOTH controllers and make sure - # we see the same target. Observe that we supply tpgs=1 as - # part of the check - with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1: - _verify_ha_inquiry(s1, api_serial_number, api_naa, 1) - with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s2: - _verify_ha_inquiry(s2, api_serial_number, api_naa, 1) - - _verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID) - _verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID) - - tpgs = { - CONTROLLER_A_TARGET_PORT_GROUP_ID: [1], - CONTROLLER_B_TARGET_PORT_GROUP_ID: [32001] - } - active_tpg = _get_active_target_portal_group() - _verify_ha_report_target_port_groups(s1, tpgs, active_tpg) - _verify_ha_report_target_port_groups(s2, tpgs, active_tpg) - - # Ensure ALUA is off again - _ensure_alua_state(False) - - # At this point we have no targets and ALUA is off - if ha: - # Now turn on ALUA again - with alua_enabled(): - _ensure_alua_state(True) - - # Then create a target (with ALUA already enabled) - with configured_target_to_file_extent(config, - target_name, - pool_name, - dataset_name, - file_name - ) as iscsi_config: - iqn = f'{basename}:{target_name}' - api_serial_number = iscsi_config['extent']['serial'] - api_naa = iscsi_config['extent']['naa'] - # Login to the target and ensure that things look reasonable. - with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1: - _verify_ha_inquiry(s1, api_serial_number, api_naa, 1) - - with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s2: - _verify_ha_inquiry(s2, api_serial_number, api_naa, 1) - - _verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID) - _verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID) - - # Use the tpgs & active_tpg from above - _verify_ha_report_target_port_groups(s1, tpgs, active_tpg) - _verify_ha_report_target_port_groups(s2, tpgs, active_tpg) - - _check_target_rw_paths(s1, s2) - - # Let's failover - _ha_reboot_master() - expect_check_condition(s1, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED" - expect_check_condition(s2, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED" - - _check_ha_node_configuration() - new_active_tpg = _get_active_target_portal_group() - assert new_active_tpg != active_tpg - - _verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID) - _verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID) - - _verify_ha_report_target_port_groups(s1, tpgs, new_active_tpg) - _verify_ha_report_target_port_groups(s2, tpgs, new_active_tpg) - - _check_target_rw_paths(s1, s2) - - # Create a new target - with configured_target_to_zvol_extent(config, f'{target_name}b', zvol) as iscsi_config2: - iqn2 = f'{basename}:{target_name}b' - api_serial_number2 = iscsi_config2['extent']['serial'] - api_naa2 = iscsi_config2['extent']['naa'] - tpgs2 = { - CONTROLLER_A_TARGET_PORT_GROUP_ID: [1, 2], - CONTROLLER_B_TARGET_PORT_GROUP_ID: [32001, 32002] - } - # Wait until ALUA settles, so that we know the target is available on the STANDBY node. - _wait_for_alua_settle() - # Login to the target on each controller - with iscsi_scsi_connection(truenas_server.nodea_ip, iqn2) as s3: - _verify_ha_inquiry(s3, api_serial_number2, api_naa2, 1) - initiator_name3 = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}:third" - with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn2, initiator_name=initiator_name3) as s4: - _verify_ha_inquiry(s4, api_serial_number2, api_naa2, 1) - _verify_ha_device_identification(s3, api_naa2, 2, CONTROLLER_A_TARGET_PORT_GROUP_ID) - _verify_ha_device_identification(s4, api_naa2, 32002, CONTROLLER_B_TARGET_PORT_GROUP_ID) - _verify_ha_report_target_port_groups(s3, tpgs2, new_active_tpg) - _verify_ha_report_target_port_groups(s4, tpgs2, new_active_tpg) - _check_target_rw_paths(s3, s4) - - # Reboot again (to failback to the original ACTIVE node) - _ha_reboot_master() - for s in [s1, s2, s3, s4]: - expect_check_condition(s, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED" - - # After the 2nd reboot we will switch back to using the original active_tpg - - # Check the new target again - _verify_ha_inquiry(s3, api_serial_number2, api_naa2, 1) - _verify_ha_inquiry(s4, api_serial_number2, api_naa2, 1) - _verify_ha_device_identification(s3, api_naa2, 2, CONTROLLER_A_TARGET_PORT_GROUP_ID) - _verify_ha_device_identification(s4, api_naa2, 32002, CONTROLLER_B_TARGET_PORT_GROUP_ID) - _verify_ha_report_target_port_groups(s3, tpgs2, active_tpg) - _verify_ha_report_target_port_groups(s4, tpgs2, active_tpg) - _check_target_rw_paths(s3, s4) - - # Check the original target - _verify_ha_inquiry(s1, api_serial_number, api_naa, 1) - _verify_ha_inquiry(s2, api_serial_number, api_naa, 1) - _verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID) - _verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID) - _verify_ha_report_target_port_groups(s1, tpgs2, active_tpg) - _verify_ha_report_target_port_groups(s2, tpgs2, active_tpg) - _check_target_rw_paths(s1, s2) - # Second target has been removed again - _wait_for_alua_settle() - _verify_ha_report_target_port_groups(s1, tpgs, active_tpg) - _verify_ha_report_target_port_groups(s2, tpgs, active_tpg) - - # Ensure ALUA is off again - _ensure_alua_state(False) - - -@skip_persistent_reservations -@skip_multi_initiator -@skip_ha_tests -def test_20_alua_basic_persistent_reservation(request): - # Don't need to specify "iscsi_cmd_00" here - depends(request, ["iscsi_alua_config", "iscsi_basic_persistent_reservation"], scope="session") - # Turn on ALUA - with alua_enabled(): - with initiator_portal() as config: - with configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name): - iqn = f'{basename}:{target_name}' - # Login to the target on each controller - with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1: - with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s2: - # Now we can do some basic tests - _pr_check_registered_keys(s1, []) - _pr_check_registered_keys(s2, []) - _pr_check_reservation(s1) - _pr_check_reservation(s2) - - with _pr_registration(s1, PR_KEY1): - _pr_check_registered_keys(s1, [PR_KEY1]) - _pr_check_registered_keys(s2, [PR_KEY1]) - _pr_check_reservation(s1) - _pr_check_reservation(s2) - - with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1, other_connections=[s2]): - _pr_check_registered_keys(s1, [PR_KEY1]) - _pr_check_registered_keys(s2, [PR_KEY1]) - _pr_check_reservation(s1, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE}) - _pr_check_reservation(s2, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE}) - - _pr_check_registered_keys(s1, [PR_KEY1]) - _pr_check_registered_keys(s2, [PR_KEY1]) - _pr_check_reservation(s1) - _pr_check_reservation(s2) - - _pr_check_registered_keys(s1, []) - _pr_check_registered_keys(s2, []) - _pr_check_reservation(s1) - _pr_check_reservation(s2) - - # Ensure ALUA is off again - _ensure_alua_state(False) - - -@skip_persistent_reservations -@skip_multi_initiator -@skip_ha_tests -def test_21_alua_persistent_reservation_two_initiators(request): - depends(request, ["iscsi_alua_config", "iscsi_basic_persistent_reservation"], scope="session") - with alua_enabled(): - with initiator_portal() as config: - with configured_target_to_zvol_extent(config, target_name, zvol): - iqn = f'{basename}:{target_name}' - # Login to the target on each controller - with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1: - TUR(s1) - initiator_name2 = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}:second" - with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn, initiator_name=initiator_name2) as s2: - TUR(s2) - _check_persistent_reservations(s1, s2) - # Do it all again, the other way around - _check_persistent_reservations(s2, s1) - - -def _get_designator(s, designator_type): - x = s.inquiry(evpd=1, page_code=0x83) - for designator in x.result["designator_descriptors"]: - if designator["designator_type"] == designator_type: - del designator["piv"] - return designator - - -def _xcopy_test(s1, s2, adds1=None, adds2=None): - zeros = bytearray(512) - deadbeef = bytearray.fromhex("deadbeef") * 128 - - def validate_blocks(s, start, end, beefy_list): - for lba in range(start, end): - r = s.read16(lba, 1) - if lba in beefy_list: - assert r.datain == deadbeef, r.datain - else: - assert r.datain == zeros, r.datain - - d1 = _get_designator(s1, 3) - d2 = _get_designator(s2, 3) - - # First let's write zeros to the first 20 blocks using WRITE SAME (16) - s1.writesame16(0, 20, zeros) - s2.writesame16(0, 20, zeros) - - # Write some deadbeef - s1.write16(1, 1, deadbeef) - s1.write16(3, 1, deadbeef) - s1.write16(4, 1, deadbeef) - - # Check that the blocks were written correctly - validate_blocks(s1, 0, 20, [1, 3, 4]) - validate_blocks(s2, 0, 20, []) - if adds1: - validate_blocks(adds1, 0, 20, [1, 3, 4]) - if adds2: - validate_blocks(adds2, 0, 20, []) - - # XCOPY - s1.extendedcopy4( - priority=1, - list_identifier=0x34, - target_descriptor_list=[ - { - "descriptor_type_code": "Identification descriptor target descriptor", - "peripheral_device_type": 0x00, - "target_descriptor_parameters": d1, - "device_type_specific_parameters": {"disk_block_length": 512}, - }, - { - "descriptor_type_code": "Identification descriptor target descriptor", - "peripheral_device_type": 0x00, - "target_descriptor_parameters": d2, - "device_type_specific_parameters": {"disk_block_length": 512}, - }, - ], - segment_descriptor_list=[ - { - "descriptor_type_code": "Copy from block device to block device", - "dc": 1, - "source_target_descriptor_id": 0, - "destination_target_descriptor_id": 1, - "block_device_number_of_blocks": 4, - "source_block_device_logical_block_address": 1, - "destination_block_device_logical_block_address": 10, - } - ], - ) - - validate_blocks(s1, 0, 20, [1, 3, 4]) - validate_blocks(s2, 0, 20, [10, 12, 13]) - if adds1: - validate_blocks(adds1, 0, 20, [1, 3, 4]) - if adds2: - validate_blocks(adds2, 0, 20, [10, 12, 13]) - - -@pytest.mark.parametrize('extent2', ["FILE", "VOLUME"]) -@pytest.mark.parametrize('extent1', ["FILE", "VOLUME"]) -def test_22_extended_copy(request, extent1, extent2): - # print(f"Extended copy {extent1} -> {extent2}") - depends(request, ["iscsi_cmd_00"], scope="session") - - name1 = f"{target_name}x1" - name2 = f"{target_name}x2" - iqn1 = f'{basename}:{name1}' - iqn2 = f'{basename}:{name2}' - - with initiator_portal() as config: - with configured_target(config, name1, extent1): - with configured_target(config, name2, extent2): - with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1: - with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2: - s1.testunitready() - s2.testunitready() - _xcopy_test(s1, s2) - - -@skip_ha_tests -@pytest.mark.parametrize('extent2', ["FILE", "VOLUME"]) -@pytest.mark.parametrize('extent1', ["FILE", "VOLUME"]) -def test_23_ha_extended_copy(request, extent1, extent2): - depends(request, ["iscsi_alua_config"], scope="session") - - name1 = f"{target_name}x1" - name2 = f"{target_name}x2" - iqn1 = f'{basename}:{name1}' - iqn2 = f'{basename}:{name2}' - - with alua_enabled(): - with initiator_portal() as config: - with configured_target(config, name1, extent1): - with configured_target(config, name2, extent2): - with iscsi_scsi_connection(truenas_server.nodea_ip, iqn1) as sa1: - with iscsi_scsi_connection(truenas_server.nodea_ip, iqn2) as sa2: - with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn1) as sb1: - with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn2) as sb2: - sa1.testunitready() - sa2.testunitready() - sb1.testunitready() - sb2.testunitready() - _xcopy_test(sa1, sa2, sb1, sb2) - # Now re-run the test using the other controller - _xcopy_test(sb1, sb2, sa1, sa2) - - -def test_24_iscsi_target_disk_login(request): - """ - Tests whether a logged in iSCSI target shows up in disks. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - iqn = f'{basename}:{target_name}' - - def fetch_disk_data(fetch_remote=False): - data = {} - if fetch_remote: - data['failover.get_disks_local'] = set(call('failover.call_remote', 'failover.get_disks_local')) - data['disk.get_unused'] = set([d['devname'] for d in call('failover.call_remote', 'disk.get_unused')]) - else: - data['failover.get_disks_local'] = set(call('failover.get_disks_local')) - data['disk.get_unused'] = set([d['devname'] for d in call('disk.get_unused')]) - return data - - def check_disk_data(old, new, whenstr, internode_check=False): - # There are some items that we can't compare between 2 HA nodes - SINGLE_NODE_COMPARE_ONLY = ['disk.get_unused'] - for key in old: - if internode_check and key in SINGLE_NODE_COMPARE_ONLY: - continue - assert old[key] == new[key], f"{key} does not match {whenstr}: {old[key]} {new[key]}" - - if ha: - # In HA we will create an ALUA target and check the STANDBY node - data_before_l = fetch_disk_data() - data_before_r = fetch_disk_data(True) - check_disk_data(data_before_l, data_before_r, "initially", True) - with alua_enabled(): - with initiator_portal() as config: - with configured_target_to_zvol_extent(config, target_name, zvol): - sleep(5) - data_after_l = fetch_disk_data() - data_after_r = fetch_disk_data(True) - check_disk_data(data_before_l, data_after_l, "after iSCSI ALUA target creation (Active)") - check_disk_data(data_before_r, data_after_r, "after iSCSI ALUA target creation (Standby)") - else: - # In non-HA we will create a target and login to it from the same TrueNAS system - # Just in case IP was supplied as a hostname use actual_ip - actual_ip = get_ip_addr(truenas_server.ip) - data_before = fetch_disk_data() - with initiator_portal() as config: - with configured_target_to_zvol_extent(config, target_name, zvol): - data_after = fetch_disk_data() - check_disk_data(data_before, data_after, "after iSCSI target creation") - - # Discover the target (loopback) - results = SSH_TEST(f"iscsiadm -m discovery -t st -p {actual_ip}", user, password) - assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}' - # Make SURE we find the target at the ip we expect - found_iqn = False - for line in results['stdout'].split('\n'): - if not line.startswith(f'{actual_ip}:'): - continue - if line.split()[1] == iqn: - found_iqn = True - assert found_iqn, f'Failed to find IQN {iqn}: out: {results["output"]}' - - # Login the target - results = SSH_TEST(f"iscsiadm -m node -T {iqn} -p {actual_ip}:3260 --login", user, password) - assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}' - # Allow some time for the disk to surface - sleep(5) - # Then check that everything looks OK - try: - data_after = fetch_disk_data() - check_disk_data(data_before, data_after, "after iSCSI target login") - finally: - results = SSH_TEST(f"iscsiadm -m node -T {iqn} -p {actual_ip}:3260 --logout", user, password) - assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}' - - -def test_25_resize_target_zvol(request): - """ - Verify that an iSCSI client is notified when the size of a ZVOL underlying - an iSCSI extent is modified. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - - with initiator_portal() as config: - with configured_target_to_zvol_extent(config, target_name, zvol, volsize=MB_100) as config: - iqn = f'{basename}:{target_name}' - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - TUR(s) - assert MB_100 == read_capacity16(s) - # Have checked using tcpdump/wireshark that a SCSI Asynchronous Event Notification - # gets sent 0x2A09: "CAPACITY DATA HAS CHANGED" - zvol_resize(zvol, MB_256) - assert MB_256 == read_capacity16(s) - # But we can do better (in terms of test) ... turn AEN off, - # which means we will get a CHECK CONDITION on the next resize - SSH_TEST(f"echo 1 > /sys/kernel/scst_tgt/targets/iscsi/{iqn}/aen_disabled", user, password) - zvol_resize(zvol, MB_512) - expect_check_condition(s, sense_ascq_dict[0x2A09]) # "CAPACITY DATA HAS CHANGED" - assert MB_512 == read_capacity16(s) - # Try to shrink the ZVOL again. Expect an error - with pytest.raises(ValidationErrors): - zvol_resize(zvol, MB_256) - assert MB_512 == read_capacity16(s) - - -def test_26_resize_target_file(request): - """ - Verify that an iSCSI client is notified when the size of a file-based - iSCSI extent is modified. - """ - depends(request, ["iscsi_cmd_00"], scope="session") - - with initiator_portal() as config: - with configured_target_to_file_extent(config, - target_name, - pool_name, - dataset_name, - file_name, - filesize=MB_100) as config: - iqn = f'{basename}:{target_name}' - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - extent_id = config['extent']['id'] - TUR(s) - assert MB_100 == read_capacity16(s) - file_extent_resize(extent_id, MB_256) - assert MB_256 == read_capacity16(s) - # Turn AEN off so that we will get a CHECK CONDITION on the next resize - SSH_TEST(f"echo 1 > /sys/kernel/scst_tgt/targets/iscsi/{iqn}/aen_disabled", user, password) - file_extent_resize(extent_id, MB_512) - expect_check_condition(s, sense_ascq_dict[0x2A09]) # "CAPACITY DATA HAS CHANGED" - assert MB_512 == read_capacity16(s) - # Try to shrink the file again. Expect an error - with pytest.raises(ValidationErrors): - file_extent_resize(extent_id, MB_256) - assert MB_512 == read_capacity16(s) - - -@skip_multi_initiator -def test_27_initiator_group(request): - depends(request, ["iscsi_cmd_00"], scope="session") - - initiator_base = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}" - initiator_iqn1 = f"{initiator_base}:one" - initiator_iqn2 = f"{initiator_base}:two" - initiator_iqn3 = f"{initiator_base}:three" - - # First create a target without an initiator group specified - with initiator_portal() as config1: - with configured_target_to_zvol_extent(config1, target_name, zvol) as config: - iqn = f'{basename}:{target_name}' - - # Ensure we can access from all initiators - for initiator_iqn in [initiator_iqn1, initiator_iqn2, initiator_iqn3]: - with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s: - TUR(s) - - # Now set the initiator id to the empty (Allow All Initiators) one - # that we created above. Then ensure we can still read access the - # target from all initiators - set_target_initiator_id(config['target']['id'], config['initiator']['id']) - for initiator_iqn in [initiator_iqn1, initiator_iqn2, initiator_iqn3]: - with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s: - TUR(s) - - # Now create another initiator group, which contains the first two - # initiators only and modify the target to use it - with initiator("two initiators only", [initiator_iqn1, initiator_iqn2]) as twoinit_config: - set_target_initiator_id(config['target']['id'], twoinit_config['id']) - # First two initiators can connect to the target - for initiator_iqn in [initiator_iqn1, initiator_iqn2]: - with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s: - TUR(s) - # Third initiator cannot connect to the target - with pytest.raises(RuntimeError) as ve: - with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn3) as s: - TUR(s) - assert 'Unable to connect to' in str(ve), ve - # Clear it again - set_target_initiator_id(config['target']['id'], None) - - for initiator_iqn in [initiator_iqn1, initiator_iqn2, initiator_iqn3]: - with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s: - TUR(s) - - -def test_28_portal_access(request): - """ - Verify that an iSCSI client can access a target on the specified - portal. - - For a HA ALUA target, check the constituent interfaces. - """ - iqn = f'{basename}:{target_name}' - with initiator() as initiator_config: - with portal(listen=[{'ip': get_ip_addr(truenas_server.ip)}]) as portal_config: - config1 = {'initiator': initiator_config, 'portal': portal_config} - with configured_target_to_zvol_extent(config1, target_name, zvol, volsize=MB_100): - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - TUR(s) - assert MB_100 == read_capacity16(s) - # Now, if we are in a HA config turn on ALUA and test - # the specific IP addresses - if ha: - with alua_enabled(): - _ensure_alua_state(True) - - with pytest.raises(RuntimeError) as ve: - with iscsi_scsi_connection(truenas_server.ip, iqn) as s: - TUR(s) - assert 'Unable to connect to' in str(ve), ve - - with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s: - TUR(s) - assert MB_100 == read_capacity16(s) - - with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s: - TUR(s) - assert MB_100 == read_capacity16(s) - - -def test_29_multiple_extents(): - """ - Verify that an iSCSI client can access multiple target LUNs - when multiple extents are configured. - - Also validate that an extent serial number cannot be reused, and - that supplying an empty string serial number means one gets - generated. - """ - iqn = f'{basename}:{target_name}' - with initiator_portal() as config: - portal_id = config['portal']['id'] - with target(target_name, [{'portal': portal_id}]) as target_config: - target_id = target_config['id'] - with dataset(dataset_name): - with file_extent(pool_name, dataset_name, "target.extent1", filesize=MB_100, extent_name="extent1") as extent1_config: - with file_extent(pool_name, dataset_name, "target.extent2", filesize=MB_256, extent_name="extent2") as extent2_config: - with target_extent_associate(target_id, extent1_config['id'], 0): - with target_extent_associate(target_id, extent2_config['id'], 1): - with iscsi_scsi_connection(truenas_server.ip, iqn, 0) as s: - TUR(s) - assert MB_100 == read_capacity16(s) - with iscsi_scsi_connection(truenas_server.ip, iqn, 1) as s: - TUR(s) - assert MB_256 == read_capacity16(s) - - # Now try to create another extent using the same serial number - # We expect this to fail. - with pytest.raises(ValidationErrors) as ve: - with file_extent(pool_name, dataset_name, "target.extent3", filesize=MB_512, - extent_name="extent3", serial=extent1_config['serial']): - pass - assert ve.value.errors == [ - ValidationError('iscsi_extent_create.serial', 'Serial number must be unique', errno.EINVAL) - ] - - with file_extent(pool_name, dataset_name, "target.extent3", filesize=MB_512, - extent_name="extent3", serial='') as extent3_config: - # We expect this to complete, but generate a serial number - assert len(extent3_config['serial']) == 15, extent3_config['serial'] - - -def check_inq_enabled_state(iqn, expected): - """Check the current enabled state of the specified SCST IQN directly from /sys - is as expected.""" - results = SSH_TEST(f"cat /sys/kernel/scst_tgt/targets/iscsi/{iqn}/enabled", user, password) - assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}' - for line in results["output"].split('\n'): - if line.startswith('Warning: Permanently added'): - continue - if line: - actual = int(line) - assert actual == expected, f'IQN {iqn} has an unexpected enabled state - was {actual}, expected {expected}' - - -def test_30_target_without_active_extent(request): - """Validate that a target will not be enabled if it does not have - and enabled associated extents""" - depends(request, ["iscsi_cmd_00"], scope="session") - - name1 = f"{target_name}x1" - name2 = f"{target_name}x2" - iqn1 = f'{basename}:{name1}' - iqn2 = f'{basename}:{name2}' - - with initiator_portal() as config: - with configured_target(config, name1, 'VOLUME') as target1_config: - with configured_target(config, name2, 'VOLUME') as target2_config: - # OK, we've configured two separate targets, ensure all looks good - check_inq_enabled_state(iqn1, 1) - check_inq_enabled_state(iqn2, 1) - with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1: - TUR(s1) - with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2: - TUR(s2) - - # Disable an extent and ensure things are as expected - extent_disable(target2_config['extent']['id']) - check_inq_enabled_state(iqn1, 1) - check_inq_enabled_state(iqn2, 0) - with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1: - TUR(s1) - with pytest.raises(RuntimeError) as ve: - with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2: - TUR(s2) - assert 'Unable to connect to' in str(ve), ve - - # Reenable the extent - extent_enable(target2_config['extent']['id']) - check_inq_enabled_state(iqn1, 1) - check_inq_enabled_state(iqn2, 1) - with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1: - TUR(s1) - with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2: - TUR(s2) - - # Move the extent from target2 to target1 - # - # Doing this by updating the existing association rather - # than deleting the old association and creating a new one, - # because want to avoid breakage wrt yield ... finally cleanup - payload = { - 'target': target1_config['target']['id'], - 'lunid': 1, - 'extent': target2_config['extent']['id'] - } - call('iscsi.targetextent.update', target2_config['associate']['id'], payload) - - check_inq_enabled_state(iqn1, 1) - check_inq_enabled_state(iqn2, 0) - with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1: - TUR(s1) - # We should now have a LUN 1 - with iscsi_scsi_connection(truenas_server.ip, iqn1, 1) as s1b: - TUR(s1b) - with pytest.raises(RuntimeError) as ve: - with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2: - TUR(s2) - assert 'Unable to connect to' in str(ve), ve - - -def test_31_iscsi_sessions(request): - """Validate that we can get a list of currently running iSCSI sessions.""" - depends(request, ["iscsi_cmd_00"], scope="session") - - name1 = f"{target_name}x1" - name2 = f"{target_name}x2" - name3 = f"{target_name}x3" - iqn1 = f'{basename}:{name1}' - iqn2 = f'{basename}:{name2}' - iqn3 = f'{basename}:{name3}' - initiator_base = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}" - initiator_iqn1 = f"{initiator_base}:one" - initiator_iqn2 = f"{initiator_base}:two" - initiator_iqn3 = f"{initiator_base}:three" - - with initiator_portal() as config: - with configured_target(config, name1, 'VOLUME'): - with configured_target(config, name2, 'FILE'): - with configured_target(config, name3, 'VOLUME'): - verify_client_count(0) - with iscsi_scsi_connection(truenas_server.ip, iqn1, initiator_name=initiator_iqn1): - verify_client_count(1) - with iscsi_scsi_connection(truenas_server.ip, iqn2, initiator_name=initiator_iqn2): - # Client count checks the number of different IPs attached, not sessions - verify_client_count(1) - # Validate that the two sessions are reported correctly - data = get_iscsi_sessions(check_length=2) - for sess in data: - if sess['target'] == iqn1: - assert sess['initiator'] == initiator_iqn1, data - elif sess['target'] == iqn2: - assert sess['initiator'] == initiator_iqn2, data - else: - # Unknown target! - assert False, data - # Filter by target - data = get_iscsi_sessions([['target', '=', iqn1]], 1) - assert data[0]['initiator'] == initiator_iqn1, data - data = get_iscsi_sessions([['target', '=', iqn2]], 1) - assert data[0]['initiator'] == initiator_iqn2, data - data = get_iscsi_sessions([['target', '=', iqn3]], 0) - # Filter by initiator - data = get_iscsi_sessions([['initiator', '=', initiator_iqn1]], 1) - assert data[0]['target'] == iqn1, data - data = get_iscsi_sessions([['initiator', '=', initiator_iqn2]], 1) - assert data[0]['target'] == iqn2, data - data = get_iscsi_sessions([['initiator', '=', initiator_iqn3]], 0) - # Now login to target2 with initiator1 - with iscsi_scsi_connection(truenas_server.ip, iqn2, initiator_name=initiator_iqn1): - verify_client_count(1) - get_iscsi_sessions(check_length=3) - # Filter by target - data = get_iscsi_sessions([['target', '=', iqn1]], 1) - assert data[0]['initiator'] == initiator_iqn1, data - data = get_iscsi_sessions([['target', '=', iqn2]], 2) - assert set([sess['initiator'] for sess in data]) == {initiator_iqn1, initiator_iqn2}, data - data = get_iscsi_sessions([['target', '=', iqn3]], 0) - # Filter by initiator - data = get_iscsi_sessions([['initiator', '=', initiator_iqn1]], 2) - assert set([sess['target'] for sess in data]) == {iqn1, iqn2}, data - data = get_iscsi_sessions([['initiator', '=', initiator_iqn2]], 1) - assert data[0]['target'] == iqn2, data - data = get_iscsi_sessions([['initiator', '=', initiator_iqn3]], 0) - # Logout of target, ensure sessions get updated. - verify_client_count(1) - data = get_iscsi_sessions(check_length=2) - for sess in data: - if sess['target'] == iqn1: - assert sess['initiator'] == initiator_iqn1, data - elif sess['target'] == iqn2: - assert sess['initiator'] == initiator_iqn2, data - else: - # Unknown target! - assert False, data - # Client count checks the number of different IPs attached, not sessions - verify_client_count(1) - get_iscsi_sessions(check_length=1) - verify_client_count(0) - get_iscsi_sessions(check_length=0) - - -def test_32_multi_lun_targets(request): - """Validate that we can create and access multi-LUN targets.""" - depends(request, ["iscsi_cmd_00"], scope="session") - - name1 = f"{target_name}x1" - name2 = f"{target_name}x2" - iqn1 = f'{basename}:{name1}' - iqn2 = f'{basename}:{name2}' - - def test_target_sizes(ipaddr): - with iscsi_scsi_connection(ipaddr, iqn1, 0) as s: - verify_capacity(s, MB_100) - with iscsi_scsi_connection(ipaddr, iqn1, 1) as s: - verify_capacity(s, MB_200) - with iscsi_scsi_connection(ipaddr, iqn2, 0) as s: - verify_capacity(s, MB_256) - with iscsi_scsi_connection(ipaddr, iqn2, 1) as s: - verify_capacity(s, MB_512) - - with initiator_portal() as config: - with configured_target(config, name1, 'FILE', extent_size=MB_100) as config1: - with add_file_extent_target_lun(config1, 1, MB_200): - with configured_target(config, name2, 'VOLUME', extent_size=MB_256) as config1: - with add_zvol_extent_target_lun(config1, 1, volsize=MB_512): - # Check that we can connect to each LUN and that it has the expected capacity - test_target_sizes(truenas_server.ip) - if ha: - # Only perform this section on a HA system - with alua_enabled(): - test_target_sizes(truenas_server.nodea_ip) - test_target_sizes(truenas_server.nodeb_ip) - - -def test_33_no_lun_zero(): - """ - Verify that an iSCSI client can login to a target that is missing LUN 0 (and LUN 1) - and that report LUNs works as expected. - """ - iqn = f'{basename}:{target_name}' - with initiator_portal() as config: - portal_id = config['portal']['id'] - with target(target_name, [{'portal': portal_id}]) as target_config: - target_id = target_config['id'] - with dataset(dataset_name): - with file_extent(pool_name, dataset_name, "target.extent1", filesize=MB_100, extent_name="extent1") as extent1_config: - with file_extent(pool_name, dataset_name, "target.extent2", filesize=MB_256, extent_name="extent2") as extent2_config: - with target_extent_associate(target_id, extent1_config['id'], 100): - with target_extent_associate(target_id, extent2_config['id'], 101): - # libiscsi sends a TUR to the lun on connect, so cannot properly test using it. - # Let's actually login and check that the expected LUNs surface. - assert target_login_test(get_ip_addr(truenas_server.ip), iqn, {100, 101}) - - # With libiscsi we can also check that the expected LUNs are there - with iscsi_scsi_connection(truenas_server.ip, iqn, 100) as s: - verify_luns(s, [100, 101]) - - -def test_34_zvol_extent_volthreading(): - """ - Ensure that volthreading is on for regular zvols and off when they are being - used an iSCSI extent. - """ - zvol_name = f"zvol_volthreading_test{digit}" - zvol = f'{pool_name}/{zvol_name}' - with zvol_dataset(zvol, MB_100, True, True): - assert get_volthreading(zvol) == 'on' - with zvol_extent(zvol, extent_name='zvolextent1'): - assert get_volthreading(zvol) == 'off' - assert get_volthreading(zvol) == 'on' - - -@pytest.mark.parametrize('extent_type', ["FILE", "VOLUME"]) -def test_35_delete_extent_no_dataset(extent_type): - """ - Verify that even if a dataset that contains an extent has been deleted from - the command line, can still use the webui/API to delete the extent. - """ - dataset_name = f'iscsids_{extent_type}_{digit}' - with dataset(dataset_name) as dspath: - DESTROY_CMD = f'zfs destroy -r {dspath}' - match extent_type: - case 'FILE': - with file_extent(pool_name, dataset_name, 'testfile', extent_name='fileextent1'): - ssh(DESTROY_CMD) - case 'VOLUME': - zvol = f'{dspath}/zvol{digit}' - with zvol_dataset(zvol, MB_100, True, True): - with zvol_extent(zvol, extent_name='zvolextent1'): - ssh(DESTROY_CMD) - - -def test_99_teardown(request): - # Disable iSCSI service - depends(request, ["iscsi_cmd_00"]) - payload = {'enable': False} - call('service.update', 'iscsitarget', payload) - # Stop iSCSI service. - call('service.stop', 'iscsitarget') - sleep(1) - # Verify stopped - service = _get_service() - assert service['state'] == "STOPPED", service diff --git a/tests/api2/test_262_iscsi_alua.py b/tests/api2/test_262_iscsi_alua.py deleted file mode 100644 index 1175ea5286471..0000000000000 --- a/tests/api2/test_262_iscsi_alua.py +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/env python3 -# -# test_261_iscsi_cmd contains some general ALUA tests, but this file will contain some -# more detailed ALUA tests -import contextlib -import random -import string -from time import sleep - -import pytest -from assets.websocket.iscsi import (alua_enabled, initiator_portal, target, - target_extent_associate, verify_capacity, - verify_luns) -from assets.websocket.service import ensure_service_enabled -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.client import truenas_server - -from auto_config import ha, pool_name -from protocols import iscsi_scsi_connection - -pytestmark = pytest.mark.skipif(not ha, reason='Tests applicable to HA only') - -SERVICE_NAME = 'iscsitarget' -MB = 1024 * 1024 - - -@contextlib.contextmanager -def zvol(name, volsizeMB): - payload = { - 'name': f'{pool_name}/{name}', - 'type': 'VOLUME', - 'volsize': volsizeMB * MB, - 'volblocksize': '16K' - } - config = call('pool.dataset.create', payload) - try: - yield config - finally: - call('pool.dataset.delete', config['id']) - - -@contextlib.contextmanager -def zvol_extent(zvol, extent_name): - payload = { - 'type': 'DISK', - 'disk': f'zvol/{zvol}', - 'name': extent_name, - } - config = call('iscsi.extent.create', payload) - try: - yield config - finally: - call('iscsi.extent.delete', config['id'], True, True) - - -class TestFixtureConfiguredALUA: - """Fixture for with iSCSI enabled and ALUA configured""" - - def wait_for_settle(self, verbose=False): - if verbose: - print("Checking ALUA status...") - retries = 12 - while retries: - if call('iscsi.alua.settled'): - if verbose: - print("ALUA is settled") - break - retries -= 1 - if verbose: - print("Waiting for ALUA to settle") - sleep(5) - - @pytest.fixture(scope='class') - def alua_configured(self): - with ensure_service_enabled(SERVICE_NAME): - call('service.start', SERVICE_NAME) - with alua_enabled(): - self.wait_for_settle() - with initiator_portal() as config: - yield config - - @contextlib.contextmanager - def target_lun(self, target_id, zvol_name, mb, lun): - with zvol(zvol_name, mb) as zvol_config: - with zvol_extent(zvol_config['id'], zvol_name) as extent_config: - with target_extent_associate(target_id, extent_config['id'], lun): - yield - - def verify_luns(self, iqn, lun_size_list): - lun_list = [lun for lun, _ in lun_size_list] - for lun, mb in lun_size_list: - # Node A - with iscsi_scsi_connection(truenas_server.nodea_ip, iqn, lun) as s: - verify_luns(s, lun_list) - verify_capacity(s, mb * MB) - # Node B - with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn, lun) as s: - verify_luns(s, lun_list) - verify_capacity(s, mb * MB) - - def test_alua_luns(self, alua_configured): - """Test whether an ALUA target reacts correctly to having a LUN added - and removed again (in terms of REPORT LUNS response)""" - config = alua_configured - portal_id = config['portal']['id'] - digits = ''.join(random.choices(string.digits, k=4)) - target_name = f"target{digits}" - iqn = f'iqn.2005-10.org.freenas.ctl:{target_name}' - with target(target_name, [{'portal': portal_id}]) as target_config: - target_id = target_config['id'] - # First configure a single extent at LUN 0 and ensure that we - # can see it from both interfaces. - with self.target_lun(target_id, f'extent0_{digits}', 100, 0): - sleep(2) - self.wait_for_settle() - self.verify_luns(iqn, [(0, 100)]) - - # Next add a 2nd extent at LUN 1 and ensure that we can see both LUNs - # from both interfaces. - with self.target_lun(target_id, f'extent1_{digits}', 200, 1): - sleep(2) - self.wait_for_settle() - self.verify_luns(iqn, [(0, 100), (1, 200)]) - - # After the LUN 1 extent has been removed again, ensure that we cannot see it - # any longer. - sleep(2) - self.wait_for_settle() - self.verify_luns(iqn, [(0, 100)]) - - # Next add back a 2nd extent at LUN 1 (with a different size) and ensure - # that we can still see both LUNs from both interfaces. - with self.target_lun(target_id, f'extent1_{digits}', 250, 1): - sleep(2) - self.wait_for_settle() - self.verify_luns(iqn, [(0, 100), (1, 250)]) - # Add a third LUN - with self.target_lun(target_id, f'extent2_{digits}', 300, 2): - sleep(2) - self.wait_for_settle() - self.verify_luns(iqn, [(0, 100), (1, 250), (2, 300)]) - sleep(2) - self.wait_for_settle() - self.verify_luns(iqn, [(0, 100), (1, 250)]) - sleep(2) - self.wait_for_settle() - self.verify_luns(iqn, [(0, 100)]) diff --git a/tests/api2/test_275_ldap.py b/tests/api2/test_275_ldap.py deleted file mode 100644 index 16ca75add2468..0000000000000 --- a/tests/api2/test_275_ldap.py +++ /dev/null @@ -1,77 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.directory_service import ldap, LDAPUSER, LDAPPASSWORD -from middlewared.test.integration.assets.privilege import privilege -from middlewared.test.integration.assets.product import product_type -from middlewared.test.integration.utils import call, client - -pytestmark = [ - pytest.mark.skipif(not LDAPUSER, reason='Missing LDAP configuration'), -] - - -@pytest.fixture(scope="module") -def do_ldap_connection(request): - with ldap() as ldap_conn: - with product_type(): - yield ldap_conn - - -def test_ldap_initial_state(): - ds = call("directoryservices.status") - assert ds["type"] is None - assert ds["status"] is None - - ldap_config = call("ldap.config") - assert not ldap_config["enable"] - - -def test_ldap_schema_choices(): - expected = {"RFC2307", "RFC2307BIS"} - - choices = call("ldap.schema_choices") - assert set(choices) == expected - - -def test_get_ldap_ssl_choices(): - expected = {"OFF", "ON", "START_TLS"} - - choices = call("ldap.ssl_choices") - assert set(choices) == expected - - -def test_ldap_connection(do_ldap_connection): - ds = call("directoryservices.status") - assert ds["type"] == "LDAP" - assert ds["status"] == "HEALTHY" - - ldap_config = call("ldap.config") - assert ldap_config["enable"] - assert ldap_config["server_type"] == "OPENLDAP" - - -def test_ldap_user_group_cache(do_ldap_connection): - assert call("user.query", [["local", "=", False]], {'count': True}) != 0 - assert call("group.query", [["local", "=", False]], {'count': True}) != 0 - - -def test_account_privilege_authentication(do_ldap_connection): - - call("system.general.update", {"ds_auth": True}) - try: - group = call("user.get_user_obj", {"username": LDAPUSER}) - assert group["source"] == "LDAP" - with privilege({ - "name": "LDAP privilege", - "local_groups": [], - "ds_groups": [group["pw_gid"]], - "allowlist": [{"method": "CALL", "resource": "system.info"}], - "web_shell": False, - }): - with client(auth=(LDAPUSER, LDAPPASSWORD)) as c: - methods = c.call("core.get_methods") - - assert "system.info" in methods - assert "pool.create" not in methods - finally: - call("system.general.update", {"ds_auth": False}) diff --git a/tests/api2/test_278_freeipa.py b/tests/api2/test_278_freeipa.py deleted file mode 100644 index cce72757e5f62..0000000000000 --- a/tests/api2/test_278_freeipa.py +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python3 - -import pytest -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import SSH_TEST -from middlewared.test.integration.assets.directory_service import ldap -from middlewared.test.integration.utils import call -from auto_config import ha, user, password - -try: - from config import ( - FREEIPA_IP, - FREEIPA_BASEDN, - FREEIPA_BINDDN, - FREEIPA_BINDPW, - FREEIPA_HOSTNAME, - ) -except ImportError: - Reason = 'FREEIPA* variable are not setup in config.py' - pytestmark = pytest.mark.skipif(True, reason=Reason) - - -@pytest.fixture(scope="module") -def do_freeipa_connection(): - # Confirm DNS forward - res = SSH_TEST(f"host {FREEIPA_HOSTNAME}", user, password) - assert res['result'] is True, res - # stdout: " has address " - assert res['stdout'].split()[-1] == FREEIPA_IP - - # DNS reverse - res = SSH_TEST(f"host {FREEIPA_IP}", user, password) - assert res['result'] is True, res - # stdout: .in-addr.arpa domain name pointer . - assert res['stdout'].split()[-1] == FREEIPA_HOSTNAME + "." - - with ldap( - FREEIPA_BASEDN, - FREEIPA_BINDDN, - FREEIPA_BINDPW, - FREEIPA_HOSTNAME, - validate_certificates=False, - ) as ldap_conn: - yield ldap_conn - - - # Validate that our LDAP configuration alert goes away when it's disabled. - alerts = [alert['klass'] for alert in call('alert.list')] - - # There's a one-shot alert that gets fired if we are an IPA domain - # connected via legacy mechanism. - assert 'IPALegacyConfiguration' not in alerts - - -def test_setup_and_enabling_freeipa(do_freeipa_connection): - # We are intentionally using an expired password in order to force - # a legacy-style LDAP bind. We need this support to not break - # existing FreeIPA users on update. This should be reworked in FT. - - ds = call('directoryservices.status') - assert ds['type'] == 'LDAP' - assert ds['status'] == 'HEALTHY' - - alerts = [alert['klass'] for alert in call('alert.list')] - - # There's a one-shot alert that gets fired if we are an IPA domain - # connected via legacy mechanism. - assert 'IPALegacyConfiguration' in alerts - - -def test_verify_config(request): - ldap_config = call('ldap.config') - assert 'RFC2307BIS' == ldap_config['schema'] - assert ldap_config['search_bases']['base_user'] == 'cn=users,cn=accounts,dc=tn,dc=ixsystems,dc=net' - assert ldap_config['search_bases']['base_group'] == 'cn=groups,cn=accounts,dc=tn,dc=ixsystems,dc=net' - assert ldap_config['search_bases']['base_netgroup'] == 'cn=ng,cn=compat,dc=tn,dc=ixsystems,dc=net' - assert ldap_config['server_type'] == 'FREEIPA' - - -def test_verify_that_the_freeipa_user_id_exist_on_the_nas(do_freeipa_connection): - """ - get_user_obj is a wrapper around the pwd module. - """ - pwd_obj = call('user.get_user_obj', {'username': 'ixauto_restricted', 'get_groups': True}) - - assert pwd_obj['pw_uid'] == 925000003 - assert pwd_obj['pw_gid'] == 925000003 - assert len(pwd_obj['grouplist']) >= 1, pwd_obj['grouplist'] - - -def test_10_verify_support_for_netgroups(do_freeipa_connection): - """ - 'getent netgroup' should be able to retrieve netgroup - """ - res = SSH_TEST("getent netgroup ixtestusers", user, password) - assert res['result'] is True, f"Failed to find netgroup 'ixgroup', returncode={res['returncode']}" - - # Confirm expected set of users or hosts - ixgroup = res['stdout'].split()[1:] - - # Confirm number of entries and some elements - assert len(ixgroup) == 3, ixgroup - assert any("testuser1" in sub for sub in ixgroup), ixgroup diff --git a/tests/api2/test_290_mail.py b/tests/api2/test_290_mail.py deleted file mode 100644 index a39de39c7f84c..0000000000000 --- a/tests/api2/test_290_mail.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD - -import pytest -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import PUT, GET - - -def test_01_Configuring_settings(): - payload = {"fromemail": "william.spam@ixsystems.com", - "outgoingserver": "mail.ixsystems.com", - "pass": "changeme", - "port": 25, - "security": "PLAIN", - "smtp": True, - "user": "william.spam@ixsystems.com"} - results = PUT("/mail/", payload) - assert results.status_code == 200, results.text - - -def test_02_look_fromemail_settings_change(): - results = GET("/mail/") - assert results.json()["fromemail"] == "william.spam@ixsystems.com" - - -def test_03_look_outgoingserver_settings_change(): - results = GET("/mail/") - assert results.json()["outgoingserver"] == "mail.ixsystems.com" - - -def test_04_look_pass_settings_change(): - results = GET("/mail/") - assert results.json()["pass"] == "changeme" - - -def test_05_look_port_settings_change(): - results = GET("/mail/") - assert results.json()["port"] == 25 - - -def test_06_look_security_settings_change(): - results = GET("/mail/") - assert results.json()["security"] == "PLAIN" - - -def test_07_look_smtp_settings_change(): - results = GET("/mail/") - assert results.json()["smtp"] is True - - -def test_08_look_user_settings_change(): - results = GET("/mail/") - assert results.json()["user"] == "william.spam@ixsystems.com" diff --git a/tests/api2/test_300_nfs.py b/tests/api2/test_300_nfs.py deleted file mode 100644 index 50f0dfcaca54b..0000000000000 --- a/tests/api2/test_300_nfs.py +++ /dev/null @@ -1,1789 +0,0 @@ -import contextlib -import ipaddress -import os -import urllib.parse -from copy import copy -from time import sleep - -import pytest - -from middlewared.service_exception import ValidationError, ValidationErrors -from middlewared.test.integration.assets.account import group as create_group -from middlewared.test.integration.assets.account import user as create_user -from middlewared.test.integration.assets.filesystem import directory -from middlewared.test.integration.utils import call, mock, ssh -from middlewared.test.integration.utils.client import truenas_server -from middlewared.test.integration.utils.system import reset_systemd_svcs as reset_svcs - -from auto_config import hostname, password, pool_name, user -from functions import DELETE, GET, POST, PUT, SSH_TEST, wait_on_job -from protocols import SSH_NFS, nfs_share - -MOUNTPOINT = f"/tmp/nfs-{hostname}" -dataset = f"{pool_name}/nfs" -dataset_url = dataset.replace('/', '%2F') -NFS_PATH = "/mnt/" + dataset - -# Supported configuration files -conf_file = { - "nfs": { - "pname": "/etc/nfs.conf.d/local.conf", - "sections": { - 'nfsd': {}, - 'exportd': {}, - 'nfsdcld': {}, - 'nfsdcltrack': {}, - 'mountd': {}, - 'statd': {}, - 'lockd': {}} - }, - "idmapd": { - "pname": "/etc/idmapd.conf", - "sections": {"General": {}, "Mapping": {}, "Translation": {}} - } -} - - -def parse_exports(): - results = SSH_TEST("cat /etc/exports", user, password) - assert results['result'] is True, f"rc={results['returncode']}, {results['output']}, {results['stderr']}" - exp = results['stdout'].splitlines() - rv = [] - for idx, line in enumerate(exp): - if not line or line.startswith('\t'): - continue - - entry = {"path": line.strip()[1:-2], "opts": []} - - i = idx + 1 - while i < len(exp): - if not exp[i].startswith('\t'): - break - - e = exp[i].strip() - host, params = e.split('(', 1) - entry['opts'].append({ - "host": host, - "parameters": params[:-1].split(",") - }) - i += 1 - - rv.append(entry) - - return rv - - -def parse_server_config(conf_type="nfs"): - ''' - Parse known 'ini' style conf files. See definition of conf_file above. - - Debian will read to /etc/default/nfs-common and then /etc/nfs.conf - All TrueNAS NFS settings are in /etc/nfs.conf.d/local.conf as overrides - ''' - assert conf_type in conf_file.keys(), f"{conf_type} is not a supported conf type" - pathname = conf_file[conf_type]['pname'] - rv = conf_file[conf_type]['sections'] - expected_sections = rv.keys() - - # Read the file and parse it - res = ssh(f"cat {pathname}") - conf = res.splitlines() - section = '' - - for line in conf: - if not line or line.startswith("#"): - continue - if line.startswith("["): - section = line.split('[')[1].split(']')[0] - assert section in expected_sections, f"Unexpected section found: {section}" - continue - - k, v = line.split(" = ", 1) - rv[section].update({k: v}) - - return rv - - -def parse_rpcbind_config(): - ''' - In Debian 12 (Bookwork) rpcbind uses /etc/default/rpcbind. - Look for /etc/rpcbind.conf in future releases. - ''' - results = SSH_TEST("cat /etc/default/rpcbind", user, password) - assert results['result'] is True, f"rc={results['returncode']}, {results['output']}, {results['stderr']}" - conf = results['stdout'].splitlines() - rv = {} - - # With bindip the line of intrest looks like: OPTIONS=-w -h 192.168.40.156 - for line in conf: - if not line or line.startswith("#"): - continue - if line.startswith("OPTIONS"): - opts = line.split('=')[1].split() - # '-w' is hard-wired, lets confirm that - assert len(opts) > 0 - assert '-w' == opts[0] - rv['-w'] = '' - # If there are more opts they must the bindip settings - if len(opts) == 3: - rv[opts[1]] = opts[2] - - return rv - - -def get_nfs_service_state(): - nfs_service = call('service.query', [['service', '=', 'nfs']], {'get': True}) - return nfs_service['state'] - - -def set_nfs_service_state(do_what=None, expect_to_pass=True, fail_check=None): - ''' - Start or Stop NFS service - expect_to_pass parameter is optional - fail_check parameter is optional - ''' - assert do_what in ['start', 'stop'], f"Requested invalid service state: {do_what}" - test_res = {'start': True, 'stop': False} - - try: - call(f'service.{do_what}', 'nfs', {'silent': False}) - except Exception as e: - if expect_to_pass: - assert False, f'Unexpected failure {do_what}ing nfs: {e!r}' - if fail_check is not None: - assert fail_check in str(e) - else: - if expect_to_pass: - res = call('service.started', 'nfs') - assert res == test_res[do_what], f'Expected {test_res[do_what]} for NFS started result, but found {res}' - - -def confirm_nfsd_processes(expected): - ''' - Confirm the expected number of nfsd processes are running - ''' - result = SSH_TEST("cat /proc/fs/nfsd/threads", user, password) - assert int(result['stdout']) == expected, result - - -def confirm_mountd_processes(expected): - ''' - Confirm the expected number of mountd processes are running - ''' - rx_mountd = r"rpc\.mountd" - result = SSH_TEST(f"ps -ef | grep '{rx_mountd}' | wc -l", user, password) - - # If there is more than one, we subtract one to account for the rpc.mountd thread manager - num_detected = int(result['stdout']) - assert (num_detected - 1 if num_detected > 1 else num_detected) == expected - - -def confirm_rpc_processes(expected=['idmapd', 'bind', 'statd']): - ''' - Confirm the expected rpc processes are running - NB: This only supports the listed names - ''' - prepend = {'idmapd': 'rpc.', 'bind': 'rpc', 'statd': 'rpc.'} - for n in expected: - procname = prepend[n] + n - result = SSH_TEST(f"pgrep {procname}", user, password) - assert len(result['output'].splitlines()) > 0 - - -def confirm_nfs_version(expected=[]): - ''' - Confirm the expected NFS versions are 'enabled and supported' - Possible values for expected: - ["3"] means NFSv3 only - ["4"] means NFSv4 only - ["3","4"] means both NFSv3 and NFSv4 - ''' - results = SSH_TEST("rpcinfo -s | grep ' nfs '", user, password) - for v in expected: - assert v in results['stdout'].strip().split()[1], results - - -def confirm_rpc_port(rpc_name, port_num): - ''' - Confirm the expected port for the requested rpc process - rpc_name = ('mountd', 'status', 'nlockmgr') - ''' - line = ssh(f"rpcinfo -p | grep {rpc_name} | grep tcp") - # example: '100005 3 tcp 618 mountd' - assert int(line.split()[3]) == port_num, str(line) - - -class NFS_CONFIG: - '''This is used to restore the NFS config to it's original state''' - default_nfs_config = {} - - -def save_nfs_config(): - ''' - Save the NFS configuration DB at the start of this test module. - This is used to restore the settings _before_ NFS is disabled near - the end of the testing. There might be a way to do this with a fixture, - but it also might require refactoring of the tests. - This is called at the start of test_01_creating_the_nfs_server. - ''' - exclude = ('id', 'v4_krb_enabled', 'v4_owner_major', 'keytab_has_nfs_spn', 'managed_nfsd') - for k, v in filter(lambda x: x[0] not in exclude, call('nfs.config').items()): - NFS_CONFIG.default_nfs_config[k] = v - - -@contextlib.contextmanager -def nfs_dataset(name, options=None, acl=None, mode=None): - assert "/" not in name - - dataset = f"{pool_name}/{name}" - - result = POST("/pool/dataset/", {"name": dataset, **(options or {})}) - assert result.status_code == 200, result.text - - if acl is None: - result = POST("/filesystem/setperm/", {'path': f"/mnt/{dataset}", "mode": mode or "777"}) - else: - result = POST("/filesystem/setacl/", {'path': f"/mnt/{dataset}", "dacl": acl}) - - assert result.status_code == 200, result.text - job_status = wait_on_job(result.json(), 180) - assert job_status["state"] == "SUCCESS", str(job_status["results"]) - - try: - yield dataset - finally: - # dataset may be busy - sleep(10) - result = DELETE(f"/pool/dataset/id/{urllib.parse.quote(dataset, '')}/") - retry = 6 - # Under some circumstances, the dataset can balk at being deleted - # leaving the dataset mounted which then buggers up subsequent tests - while result.status_code != 200 and retry > 0: - sleep(10) - result = DELETE(f"/pool/dataset/id/{urllib.parse.quote(dataset, '')}/") - retry -= 1 - assert result.status_code == 200, result.text - - -@contextlib.contextmanager -def nfs_config(options=None): - ''' - Use this to restore settings when changed within a test function. - Example usage: - with nfs_config(): - - ''' - try: - nfs_db_conf = call("nfs.config") - excl = ['id', 'v4_krb_enabled', 'v4_owner_major', 'keytab_has_nfs_spn', 'managed_nfsd'] - [nfs_db_conf.pop(key) for key in excl] - yield copy(nfs_db_conf) - finally: - call("nfs.update", nfs_db_conf) - - -# Enable NFS server -@pytest.mark.dependency(name='NFS_INIT') -def test_01_init_the_nfs_config(): - # initialize default_nfs_config for later restore - save_nfs_config() - - # Confirm NFS is not running - nfs_state = get_nfs_service_state() - assert nfs_state == 'STOPPED', f'Before update, expected NFS to be STOPPED, but found {nfs_state}' - - payload = { - "mountd_port": 618, - "allow_nonroot": False, - "rpcstatd_port": 871, - "rpclockd_port": 32803, - "protocols": ["NFSV3", "NFSV4"] - } - nfs_conf = call("nfs.update", payload) - assert nfs_conf['mountd_port'] == 618 - assert nfs_conf['rpcstatd_port'] == 871 - assert nfs_conf['rpclockd_port'] == 32803 - - # Confirm NFS remains not running - nfs_state = get_nfs_service_state() - assert nfs_state == 'STOPPED', f'After update, xpected NFS to be STOPPED, but found {nfs_state}' - - -@pytest.mark.dependency(name='NFS_DATASET_CREATED') -def test_02_creating_dataset_nfs(): - payload = {"name": dataset} - results = POST("/pool/dataset/", payload) - assert results.status_code == 200, results.text - - -@pytest.mark.dependency(depends=['NFS_DATASET_CREATED']) -def test_03_changing_dataset_permissions_of_nfs_dataset(): - payload = { - "acl": [], - "mode": "777", - "user": "root", - "group": 'root' - } - results = POST(f"/pool/dataset/id/{dataset_url}/permission/", payload) - assert results.status_code == 200, results.text - global job_id - job_id = results.json() - - -def test_04_verify_the_job_id_is_successfull(): - job_status = wait_on_job(job_id, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -@pytest.mark.dependency(name='NFSID_SHARE_CREATED', depends=['NFS_DATASET_CREATED']) -def test_05_creating_a_nfs_share_on_nfs_PATH(): - global nfsid - paylaod = {"comment": "My Test Share", - "path": NFS_PATH, - "security": ["SYS"]} - results = POST("/sharing/nfs/", paylaod) - assert results.status_code == 200, results.text - nfsid = results.json()['id'] - - -def test_06_starting_nfs_service_at_boot(): - results = PUT("/service/id/nfs/", {"enable": True}) - assert results.status_code == 200, results.text - - -def test_07_checking_to_see_if_nfs_service_is_enabled_at_boot(): - results = GET("/service?service=nfs") - assert results.json()[0]["enable"] is True, results.text - - -@pytest.mark.dependency(name='NFS_SERVICE_STARTED') -def test_08_starting_nfs_service(): - set_nfs_service_state('start') - - -def test_09_checking_to_see_if_nfs_service_is_running(): - results = GET("/service?service=nfs") - assert results.json()[0]["state"] == "RUNNING", results.text - - -@pytest.mark.dependency(depends=['NFS_SERVICE_STARTED']) -def test_10_confirm_state_directory(): - """ - By default, the NFS state directory is at /var/lib/nfs. - To support HA systems, we moved this to the system dataset - at /var/db/system/nfs. In support of this we updated the - NFS conf file settings - """ - - # Make sure the conf file has the expected settings - nfs_state_dir = '/var/db/system/nfs' - s = parse_server_config() - assert s['exportd']['state-directory-path'] == nfs_state_dir, str(s) - assert s['nfsdcld']['storagedir'] == os.path.join(nfs_state_dir, 'nfsdcld'), str(s) - assert s['nfsdcltrack']['storagedir'] == os.path.join(nfs_state_dir, 'nfsdcltrack'), str(s) - assert s['nfsdcld']['storagedir'] == os.path.join(nfs_state_dir, 'nfsdcld'), str(s) - assert s['mountd']['state-directory-path'] == nfs_state_dir, str(s) - assert s['statd']['state-directory-path'] == nfs_state_dir, str(s) - # Confirm we have the mount point in the system dataset - # ---------------------------------------------------------------------- - # NOTE: Update test_001_ssh.py: test_002_first_boot_checks. - # NOTE: Test fresh-install and upgrade. - # ---------------------------------------------------------------------- - - -@pytest.mark.parametrize('vers', [3, 4]) -def test_11_perform_basic_nfs_ops(vers): - with SSH_NFS(truenas_server.ip, NFS_PATH, vers=vers, user=user, password=password, ip=truenas_server.ip) as n: - n.create('testfile') - n.mkdir('testdir') - contents = n.ls('.') - assert 'testdir' in contents - assert 'testfile' in contents - - n.unlink('testfile') - n.rmdir('testdir') - contents = n.ls('.') - assert 'testdir' not in contents - assert 'testfile' not in contents - - -def test_12_perform_server_side_copy(): - with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, user=user, password=password, ip=truenas_server.ip) as n: - n.server_side_copy('ssc1', 'ssc2') - - -@pytest.mark.dependency(depends=['NFS_SERVICE_STARTED']) -@pytest.mark.parametrize('nfsd,cores,expected', [ - # User specifies number of nfsd, expect: 50 nfsd, 12 mountd - (50, 1, {'nfsd': 50, 'mountd': 12, 'managed': False}), - # Dynamic, expect 12 nfsd and 3 mountd - (None, 12, {'nfsd': 12, 'mountd': 3, 'managed': True}), - # Dynamic, expect 4 nfsd and 1 mountd - (None, 4, {'nfsd': 4, 'mountd': 1, 'managed': True}), - # Dynamic, expect 2 nfsd and 1 mountd - (None, 2, {'nfsd': 2, 'mountd': 1, 'managed': True}), - # Dynamic, expect 1 nfsd and 1 mountd - (None, 1, {'nfsd': 1, 'mountd': 1, 'managed': True}), - # Should be trapped by validator: Illegal input - (0, 4, {'nfsd': 4, 'mountd': 1, 'managed': True}), - # Should be trapped by validator: Illegal input - (257, 4, {'nfsd': 4, 'mountd': 1, 'managed': True}), - # Dynamic, max nfsd via calculation is 32 - (None, 48, {'nfsd': 32, 'mountd': 8, 'managed': True}), - # -1 is a flag to set bindip and confirm 'managed' stays True, - (-1, 48, {'nfsd': 32, 'mountd': 8, 'managed': True}), -]) -def test_19_updating_the_nfs_service(nfsd, cores, expected): - """ - This test verifies that service can be updated in general, - and also that the 'servers' key can be altered. - Latter goal is achieved by reading the nfs config file - and verifying that the value here was set correctly. - - Update: - The default setting for 'servers' is None. This specifies that we dynamically - determine the number of nfsd to start based on the capabilities of the system. - In this state, we choose one nfsd for each CPU core. - The user can override the dynamic calculation by specifying a - number greater than zero. - - The number of mountd will be 1/4 the number of nfsd. - """ - - with mock("system.cpu_info", return_value={"core_count": cores}): - - # Use 0 as 'null' flag - if nfsd is None or nfsd in range(1, 257): - call("nfs.update", {"servers": nfsd}) - - s = parse_server_config() - assert int(s['nfsd']['threads']) == expected['nfsd'], str(s) - assert int(s['mountd']['threads']) == expected['mountd'], str(s) - - confirm_nfsd_processes(expected['nfsd']) - confirm_mountd_processes(expected['mountd']) - confirm_rpc_processes() - - # In all passing cases, the 'servers' field represents the number of expected nfsd - nfs_conf = call("nfs.config") - assert nfs_conf['servers'] == expected['nfsd'] - assert nfs_conf['managed_nfsd'] == expected['managed'] - else: - if nfsd == -1: - # We know apriori that the current state is managed_nfsd == True - with nfs_config(): - # Test making change to non-'server' setting does not change managed_nfsd - call("nfs.update", {"bindip": [truenas_server.ip]}) - assert call("nfs.config")['managed_nfsd'] == expected['managed'] - else: - with pytest.raises(ValidationErrors) as ve: - assert call("nfs.config")['managed_nfsd'] == expected['managed'] - call("nfs.update", {"servers": nfsd}) - - assert ve.value.errors == [ValidationError('nfs_update.servers', 'Should be between 1 and 256', 22)] - - -@pytest.mark.dependency(depends=['NFSID_SHARE_CREATED']) -def test_20_update_nfs_share(): - nfsid = GET('/sharing/nfs?comment=My Test Share').json()[0]['id'] - payload = {"security": []} - results = PUT(f"/sharing/nfs/id/{nfsid}/", payload) - assert results.status_code == 200, results.text - - -def test_21_checking_to_see_if_nfs_service_is_enabled(): - results = GET("/service?service=nfs") - assert results.json()[0]["state"] == "RUNNING", results.text - - -@pytest.mark.dependency(depends=['NFSID_SHARE_CREATED', 'NFS_SERVICE_STARTED']) -@pytest.mark.parametrize( - "networklist,ExpectedToPass", [ - # IPv4 - (["192.168.0.0/24", "192.168.1.0/24"], True), - (["192.168.0.0/16", "192.168.1.0/24"], False), - (["192.168.0.0/24", "192.168.0.211/32"], False), - (["192.168.0.0/64"], False), - (["bogus_network"], False), - (["192.168.27.211"], True), # auto-converted to CIDR: 192.168.27.211/32 - # IPv6 - (["2001:0db8:85a3:0000:0000:8a2e::/96", "2001:0db8:85a3:0000:0000:8a2f::/96"], True), - (["2001:0db8:85a3:0000:0000:8a2e::/96", "2001:0db8:85a3:0000:0000:8a2f::/88"], False), - (["2001:0db8:85a3:0000:0000:8a2e::/96", "2001:0db8:85a3:0000:0000:8a2e:0370:7334/128"], False), - (["2001:0db8:85a3:0000:0000:8a2e:0370:7334/256"], False), - (["2001:0db8:85a3:0000:0000:8a2e:0370:7334"], True), # auto-converted to CIDR with /128 - ], - ids=[ - "IPv4 - non-overlap", - "IPv4 - overlap wide", - "IPv4 - overlap narrow", - "IPv4 - invalid range", - "IPv4 - invalid format", - "IPv4 - auto-convert to CIDR", - "IPv6 - non-overlap", - "IPv6 - overlap wide", - "IPv6 - overlap narrow", - "IPv6 - invalid range", - "IPv6 - auto-convert to CIDR", - ] -) -def test_31_check_nfs_share_network(networklist, ExpectedToPass): - """ - Verify that adding a network generates an appropriate line in exports - file for same path. Sample: - - "/mnt/dozer/nfs"\ - 192.168.0.0/24(sec=sys,rw,subtree_check)\ - 192.168.1.0/24(sec=sys,rw,subtree_check) - """ - - results = PUT(f"/sharing/nfs/id/{nfsid}/", {'networks': networklist}) - if ExpectedToPass: - assert results.status_code == 200, results.text - else: - assert results.status_code != 200, results.text - - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - - exports_networks = [x['host'] for x in parsed[0]['opts']] - if ExpectedToPass: - # The input is converted to CIDR format which often will - # look different from the input. e.g. 1.2.3.4/16 -> 1.2.0.0/16 - cidr_list = [str(ipaddress.ip_network(x, strict=False)) for x in networklist] - # The entry should be present - diff = set(cidr_list) ^ set(exports_networks) - assert len(diff) == 0, f'diff: {diff}, exports: {parsed}' - else: - # The entry should not be present - assert len(exports_networks) == 1, str(parsed) - - # Reset to default - results = PUT(f"/sharing/nfs/id/{nfsid}/", {'networks': []}) - assert results.status_code == 200, results.text - - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - exports_networks = [x['host'] for x in parsed[0]['opts']] - assert len(exports_networks) == 1, str(parsed) - assert exports_networks[0] == '*', str(parsed) - - -@pytest.mark.dependency(depends=['NFSID_SHARE_CREATED', 'NFS_SERVICE_STARTED']) -@pytest.mark.parametrize( - "hostlist,ExpectedToPass", [ - (["192.168.0.69", "192.168.0.70", "@fakenetgroup"], True), - (["asdfnm-*", "?-asdfnm-*", "asdfnm[0-9]", "nmix?-*dev[0-9]"], True), - (["asdfdm-*.example.com", "?-asdfdm-*.ixsystems.com", - "asdfdm[0-9].example.com", "dmix?-*dev[0-9].ixsystems.com"], True), - (["-asdffail", "*.asdffail.com", "*.*.com", "bozofail.?.*"], False), - (["bogus/name"], False), - (["192.168.1.0/24"], False), - (["asdfdm[0-9].example.com", "-asdffail", - "devteam-*.ixsystems.com", "*.asdffail.com"], False), - (["192.168.1.0", "192.168.1.0"], False), - (["ixsystems.com", "ixsystems.com"], False), - (["ixsystems.com", "*"], True), - (["*", "*.ixsystems.com"], True), - (["192.168.1.o"], False), - (["bad host"], False), - (["2001:0db8:85a3:0000:0000:8a2e:0370:7334"], True) - ], - ids=[ - "Valid - IPv4 address, netgroup", - "Valid - wildcard names,ranges", - "Valid - wildcard domains,ranges", - "Invalid - names,domains (not resolvable)", - "Invalid - name (path)", - "Invalid - name (network format)", - "Mix - valid and invalid names", - "Invalid - duplicate address", - "Invalid - duplicate domain", - "Valid - mix name and everybody", - "Valid - mix everybody and wildcard name", - "Invalid - character in address", - "Invalid - name with spaces", - "Valid - IPv6 address" - ] -) -def test_32_check_nfs_share_hosts(hostlist, ExpectedToPass): - """ - Verify that adding a network generates an appropriate line in exports - file for same path. Sample: - - "/mnt/dozer/nfs"\ - 192.168.0.69(sec=sys,rw,subtree_check)\ - 192.168.0.70(sec=sys,rw,subtree_check)\ - @fakenetgroup(sec=sys,rw,subtree_check) - - host name handling in middleware: - If the host name contains no wildcard or special chars, - then we test it with a look up - else we apply the host name rules and skip the look up - - The rules for the host field are: - - Dashes are allowed, but a level cannot start or end with a dash, '-' - - Only the left most level may contain special characters: '*','?' and '[]' - """ - results = PUT(f"/sharing/nfs/id/{nfsid}/", {'hosts': hostlist}) - if ExpectedToPass: - assert results.status_code == 200, results.text - else: - assert results.status_code != 200, results.text - - # Check the exports file - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - exports_hosts = [x['host'] for x in parsed[0]['opts']] - if ExpectedToPass: - # The entry should be present - diff = set(hostlist) ^ set(exports_hosts) - assert len(diff) == 0, f'diff: {diff}, exports: {parsed}' - else: - # The entry should not be present - assert len(exports_hosts) == 1, str(parsed) - - # Reset to default should always pass - cleanup_results = PUT(f"/sharing/nfs/id/{nfsid}/", {'hosts': []}) - assert cleanup_results.status_code == 200, results.text - # Check the exports file to confirm it's clear - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - exports_hosts = [x['host'] for x in parsed[0]['opts']] - assert len(exports_hosts) == 1, str(parsed) - - -@pytest.mark.dependency(depends=['NFSID_SHARE_CREATED']) -def test_33_check_nfs_share_ro(): - """ - Verify that toggling `ro` will cause appropriate change in - exports file. We also verify with write tests on a local mount. - """ - - # Make sure we end up in the original state with 'rw' - try: - # Confirm 'rw' initial state and create a file and dir - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - assert "rw" in parsed[0]['opts'][0]['parameters'], str(parsed) - - # Create the file and dir - with SSH_NFS(truenas_server.ip, NFS_PATH, user=user, password=password, ip=truenas_server.ip) as n: - n.create("testfile_should_pass") - n.mkdir("testdir_should_pass") - - # Change to 'ro' - results = PUT(f"/sharing/nfs/id/{nfsid}/", {'ro': True}) - assert results.status_code == 200, results.text - - # Confirm 'ro' state and behavior - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - assert "rw" not in parsed[0]['opts'][0]['parameters'], str(parsed) - - # Attempt create and delete - with SSH_NFS(truenas_server.ip, NFS_PATH, user=user, password=password, ip=truenas_server.ip) as n: - with pytest.raises(RuntimeError) as re: - n.create("testfile_should_fail") - assert False, "Should not have been able to create a new file" - assert 'cannot touch' in str(re), re - - with pytest.raises(RuntimeError) as re: - n.mkdir("testdir_should_fail") - assert False, "Should not have been able to create a new directory" - assert 'cannot create directory' in str(re), re - - finally: - results = PUT(f"/sharing/nfs/id/{nfsid}/", {'ro': False}) - assert results.status_code == 200, results.text - - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - assert "rw" in parsed[0]['opts'][0]['parameters'], str(parsed) - - # Cleanup the file and dir - with SSH_NFS(truenas_server.ip, NFS_PATH, user=user, password=password, ip=truenas_server.ip) as n: - n.unlink("testfile_should_pass") - n.rmdir("testdir_should_pass") - - -@pytest.mark.dependency(depends=['NFSID_SHARE_CREATED']) -def test_34_check_nfs_share_maproot(): - """ - root squash is always enabled, and so maproot accomplished through - anonuid and anongid - - Sample: - "/mnt/dozer/NFSV4"\ - *(sec=sys,rw,anonuid=65534,anongid=65534,subtree_check) - """ - - call('sharing.nfs.update', nfsid, { - 'maproot_user': 'nobody', - 'maproot_group': 'nogroup' - }) - - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - - params = parsed[0]['opts'][0]['parameters'] - assert 'anonuid=65534' in params, str(parsed) - assert 'anongid=65534' in params, str(parsed) - - """ - setting maproot_user and maproot_group to root should - cause us to append "no_root_squash" to options. - """ - call('sharing.nfs.update', nfsid, { - 'maproot_user': 'root', - 'maproot_group': 'root' - }) - - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - params = parsed[0]['opts'][0]['parameters'] - assert 'no_root_squash' in params, str(parsed) - assert not any(filter(lambda x: x.startswith('anon'), params)), str(parsed) - - """ - Second share should have normal (no maproot) params. - """ - second_share = f'/mnt/{pool_name}/second_share' - with nfs_dataset('second_share'): - with nfs_share(second_share): - parsed = parse_exports() - assert len(parsed) == 2, str(parsed) - - params = parsed[0]['opts'][0]['parameters'] - assert 'no_root_squash' in params, str(parsed) - - params = parsed[1]['opts'][0]['parameters'] - assert 'no_root_squash' not in params, str(parsed) - assert not any(filter(lambda x: x.startswith('anon'), params)), str(parsed) - - call('sharing.nfs.update', nfsid, { - 'maproot_user': '', - 'maproot_group': '' - }) - - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - params = parsed[0]['opts'][0]['parameters'] - - assert not any(filter(lambda x: x.startswith('anon'), params)), str(parsed) - - -@pytest.mark.dependency(depends=['NFSID_SHARE_CREATED']) -def test_35_check_nfs_share_mapall(): - """ - mapall is accomplished through anonuid and anongid and - setting 'all_squash'. - - Sample: - "/mnt/dozer/NFSV4"\ - *(sec=sys,rw,all_squash,anonuid=65534,anongid=65534,subtree_check) - """ - - call('sharing.nfs.update', nfsid, { - 'mapall_user': 'nobody', - 'mapall_group': 'nogroup' - }) - - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - - params = parsed[0]['opts'][0]['parameters'] - assert 'anonuid=65534' in params, str(parsed) - assert 'anongid=65534' in params, str(parsed) - assert 'all_squash' in params, str(parsed) - - call('sharing.nfs.update', nfsid, { - 'mapall_user': '', - 'mapall_group': '' - }) - - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - params = parsed[0]['opts'][0]['parameters'] - - assert not any(filter(lambda x: x.startswith('anon'), params)), str(parsed) - assert 'all_squash' not in params, str(parsed) - - -@pytest.mark.dependency(depends=['NFSID_SHARE_CREATED']) -def test_36_check_nfsdir_subtree_behavior(): - """ - If dataset mountpoint is exported rather than simple dir, - we disable subtree checking as an optimization. This check - makes sure we're doing this as expected: - - Sample: - "/mnt/dozer/NFSV4"\ - *(sec=sys,rw,no_subtree_check) - "/mnt/dozer/NFSV4/foobar"\ - *(sec=sys,rw,subtree_check) - """ - - with directory(f'{NFS_PATH}/sub1') as tmp_path: - with nfs_share(tmp_path, {'hosts': ['127.0.0.1']}): - parsed = parse_exports() - assert len(parsed) == 2, str(parsed) - - assert parsed[0]['path'] == NFS_PATH, str(parsed) - assert 'no_subtree_check' in parsed[0]['opts'][0]['parameters'], str(parsed) - - assert parsed[1]['path'] == tmp_path, str(parsed) - assert 'subtree_check' in parsed[1]['opts'][0]['parameters'], str(parsed) - - -class Test37WithFixture: - """ - Wrap a class around test_37 to allow calling the fixture only once - in the parametrized test - """ - - # TODO: Work up a valid IPv6 test - # res = SSH_TEST(f"ip address show {interface} | grep inet6", user, password, ip) - # ipv6_network = str(res['output'].split()[1]) - # ipv6_host = ipv6_network.split('/')[0] - - @pytest.fixture(scope='class') - def dataset_and_dirs(self): - """ - Create a dataset and an NFS share for it for host 127.0.0.1 only - In the dataset, create directories: dir1, dir2, dir3 - In each directory, create subdirs: subdir1, subdir2, subdir3 - """ - - # Characteristics of expected error messages - err_strs = [ - ["Another share", "same path"], - ["This or another", "overlaps"], - ["Another NFS share already exports"], - ["Symbolic links"] - ] - - vol0 = f'/mnt/{pool_name}/VOL0' - with nfs_dataset('VOL0'): - # Top level shared to narrow host - with nfs_share(vol0, {'hosts': ['127.0.0.1']}): - # Get the initial list of entries for the cleanup test - contents = GET('/sharing/nfs').json() - startIdList = [item.get('id') for item in contents] - - # Create the dirs - dirs = ["everybody_1", "everybody_2", "limited_1", "dir_1", "dir_2"] - subdirs = ["subdir1", "subdir2", "subdir3"] - try: - for dir in dirs: - results = SSH_TEST(f"mkdir -p {vol0}/{dir}", user, password) - assert results['result'] is True - for subdir in subdirs: - results = SSH_TEST(f"mkdir -p {vol0}/{dir}/{subdir}", user, password) - assert results['result'] is True - # And symlinks - results = SSH_TEST( - f"ln -sf {vol0}/{dir}/{subdir} {vol0}/{dir}/symlink2{subdir}", - user, password - ) - assert results['result'] is True - - yield vol0, err_strs - finally: - # Remove the created dirs - for dir in dirs: - SSH_TEST(f"rm -rf {vol0}/{dir}", user, password) - assert results['result'] is True - - # Remove the created shares - contents = GET('/sharing/nfs').json() - endIdList = [item.get('id') for item in contents] - for id in endIdList: - if id not in startIdList: - result = DELETE(f"/sharing/nfs/id/{id}/") - assert result.status_code == 200, result.text - - # Parameters for test_37 - # Directory (dataset share VOL0), isHost, HostOrNet, ExpectedToPass, ErrFormat - @pytest.mark.parametrize( - "dirname,isHost,HostOrNet,ExpectedToPass, ErrFormat", [ - ("everybody_1", True, ["*"], True, None), - ("everybody_2", True, ["*"], True, None), - ("everybody_2", False, ["192.168.1.0/22"], True, None), - ("limited_1", True, ["127.0.0.1"], True, None), - ("limited_1", False, ["192.168.1.0/22"], True, None), - ("limited_1", True, ["127.0.0.1"], False, 0), - ("limited_1", False, ["192.168.1.0/22"], False, 2), - ("dir_1", True, ["*.example.com"], True, None), - ("dir_1", True, ["*.example.com"], False, 0), - ("dir_1/subdir2", False, ["2001:0db8:85a3:0000:0000:8a2e::/96"], True, None), - ("dir_1/subdir2", True, ["2001:0db8:85a3:0000:0000:8a2e:0370:7334"], True, None), - ("dir_1/subdir2", False, ["2001:0db8:85a3:0000:0000:8a2e:0370:7334/112"], False, 1), - ("dir_1/subdir3", True, ["192.168.27.211"], True, None), - ("dir_1/subdir3", False, ["192.168.24.0/22"], True, None), - ("limited_1/subdir2", True, ["*"], True, None), - ("limited_1/subdir2", True, ["*"], False, 2), - ("dir_2/subdir2", False, ["192.168.1.0/24"], True, None), - ("dir_2/subdir2", False, ["192.168.1.0/32"], False, 1), - ("limited_1/subdir3", True, ["192.168.1.0", "*.ixsystems.com"], True, None), - ("dir_1/symlink2subdir3", True, ["192.168.0.0"], False, 3), - ], - ids=[ - "NAS-120957: host - everybody", - "NAS-120957: host - non-related paths", - "NAS-129577: network with everybody on same path", - "NAS-123042: host - export subdirs", - "NAS-123042: network - export subdirs", - "NAS-127220: host - already exported", - "NAS-127220: network - already exported", - "NAS-120616: host - wildcards", - "NAS-127220: host - wildcard already exported", - "NAS-123042: network - IPv6 network range", - "NAS-129577: host - IPv6 allow host overlap with network", - "NAS-123042: network - IPv6 overlap with network", - "NAS-123042: host - export sub-subdirs", - "NAS-129522: network - allow overlap with host", - "NAS-123042: host - everybody on sub-subdir", - "NAS-127220: host - everybody already exported sub-subdir", - "NAS-123042: network - export sub-subdirs", - "NAS-123042: network - overlaping networks sub-subdir", - "NAS-123042: host - two hosts, same sub-subdir", - "Block exporting symlinks" - ] - ) - def test_37_check_nfsdir_subtree_share(self, dataset_and_dirs, dirname, isHost, HostOrNet, ExpectedToPass, ErrFormat): - """ - Sharing subtrees to the same host can cause problems for - NFSv3. This check makes sure a share creation follows - the rules. - * First match is applied - * A new path that is _the same_ as existing path cannot be shared to same 'host' - - For example, the following is not allowed: - "/mnt/dozer/NFS"\ - fred(rw) - "/mnt/dozer/NFS"\ - fred(ro) - - Also not allowed are collisions that may result in unexpected share permissions. - For example, the following is not allowed: - "/mnt/dozer/NFS"\ - *(rw) - "/mnt/dozer/NFS"\ - marketing(ro) - """ - - vol, err_strs = dataset_and_dirs - dirpath = f'{vol}/{dirname}' - if isHost: - payload = {"path": dirpath, "hosts": HostOrNet} - else: - payload = {"path": dirpath, "networks": HostOrNet} - - if ExpectedToPass: - call("sharing.nfs.create", payload) - else: - with pytest.raises(ValidationErrors) as ve: - call("sharing.nfs.create", payload) - errStr = str(ve.value.errors[0]) - # Confirm we have the expected error message format - for this_substr in err_strs[ErrFormat]: - assert this_substr in errStr - - -@pytest.mark.dependency(depends=['NFSID_SHARE_CREATED']) -def test_38_check_nfs_allow_nonroot_behavior(): - """ - If global configuration option "allow_nonroot" is set, then - we append "insecure" to each exports line. - Since this is a global option, it triggers an nfsd restart - even though it's not technically required. - Linux will, by default, mount using a priviledged port (1..1023) - MacOS NFS mounts do not follow this 'standard' behavior. - - Four conditions to test: - server: secure (e.g. allow_nonroot is False) - client: resvport -> expect to pass. - client: noresvport -> expect to fail. - server: insecure (e.g. allow_nonroot is True) - client: resvport -> expect to pass. - client: noresvport -> expect to pass - - Sample: - "/mnt/dozer/NFSV4"\ - *(sec=sys,rw,insecure,no_subtree_check) - """ - - def get_client_nfs_port(): - ''' - Output from netstat -nt looks like: - tcp 0 0 127.0.0.1:50664 127.0.0.1:6000 ESTABLISHED - The client port is the number after the ':' in the 5th column - ''' - rv = (None, None) - res = ssh("netstat -nt") - for line in str(res).splitlines(): - # The server will listen on port 2049 - if f"{truenas_server.ip}:2049" == line.split()[3]: - rv = (line, line.split()[4].split(':')[1]) - return rv - - # Verify that NFS server configuration is as expected - with nfs_config() as nfs_conf_orig: - - # --- Test: allow_nonroot is False - assert nfs_conf_orig['allow_nonroot'] is False, nfs_conf_orig - - # Confirm setting in /etc/exports - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - assert 'insecure' not in parsed[0]['opts'][0]['parameters'], str(parsed) - - # Confirm we allow mounts from 'root' ports - with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, user=user, password=password, ip=truenas_server.ip): - client_port = get_client_nfs_port() - assert client_port[1] is not None, f"Failed to get client port: f{client_port[0]}" - assert int(client_port[1]) < 1024, \ - f"client_port is not in 'root' range: {client_port[1]}\n{client_port[0]}" - - # Confirm we block mounts from 'non-root' ports - with pytest.raises(RuntimeError) as re: - with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, options=['noresvport'], - user=user, password=password, ip=truenas_server.ip): - pass - # We should not get to this assert - assert False, "Unexpected success with mount" - assert 'Operation not permitted' in str(re), re - - # --- Test: allow_nonroot is True - new_nfs_conf = call('nfs.update', {"allow_nonroot": True}) - assert new_nfs_conf['allow_nonroot'] is True, new_nfs_conf - - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - assert 'insecure' in parsed[0]['opts'][0]['parameters'], str(parsed) - - # Confirm we allow mounts from 'root' ports - with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, user=user, password=password, ip=truenas_server.ip): - client_port = get_client_nfs_port() - assert client_port[1] is not None, "Failed to get client port" - assert int(client_port[1]) < 1024, \ - f"client_port is not in 'root' range: {client_port[1]}\n{client_port[0]}" - - # Confirm we allow mounts from 'non-root' ports - with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, options=['noresvport'], - user=user, password=password, ip=truenas_server.ip): - client_port = get_client_nfs_port() - assert client_port[1] is not None, "Failed to get client port" - assert int(client_port[1]) >= 1024, \ - f"client_port is not in 'non-root' range: {client_port[1]}\n{client_port[0]}" - - # Confirm setting was returned to original state - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - assert 'insecure' not in parsed[0]['opts'][0]['parameters'], str(parsed) - - -def test_39_check_nfs_service_protocols_parameter(): - """ - This test verifies that changing the `protocols` option generates expected - changes in nfs kernel server config. In most cases we will also confirm - the settings have taken effect. - - For the time being this test will also exercise the deprecated `v4` option - to the same effect, but this will later be removed. - - NFS must be enabled for this test to succeed as while the config (i.e. - database) will be updated regardless, the server config file will not - be updated. - """ - results = GET("/service?service=nfs") - assert results.json()[0]["state"] == "RUNNING", results - - # Multiple restarts cause systemd failures. Reset the systemd counters. - reset_svcs("nfs-idmapd nfs-mountd nfs-server rpcbind rpc-statd") - - # Check existing config (both NFSv3 & NFSv4 configured) - results = GET("/nfs") - assert results.status_code == 200, results.text - protocols = results.json()['protocols'] - assert "NFSV3" in protocols, results.text - assert "NFSV4" in protocols, results.text - - s = parse_server_config() - assert s['nfsd']["vers3"] == 'y', str(s) - assert s['nfsd']["vers4"] == 'y', str(s) - confirm_nfs_version(['3', '4']) - - # Turn off NFSv4 (v3 on) - results = PUT("/nfs/", {"protocols": ["NFSV3"]}) - assert results.status_code == 200, results.text - - results = GET("/nfs") - assert results.status_code == 200, results.text - protocols = results.json()['protocols'] - assert "NFSV3" in protocols, results.text - assert "NFSV4" not in protocols, results.text - - s = parse_server_config() - assert s['nfsd']["vers3"] == 'y', str(s) - assert s['nfsd']["vers4"] == 'n', str(s) - - # Confirm setting has taken effect: v4->off, v3->on - confirm_nfs_version(['3']) - - # Try (and fail) to turn off both - results = PUT("/nfs/", {"protocols": []}) - assert results.status_code != 200, results.text - - # Turn off NFSv3 (v4 on) - results = PUT("/nfs/", {"protocols": ["NFSV4"]}) - assert results.status_code == 200, results.text - - results = GET("/nfs") - assert results.status_code == 200, results.text - protocols = results.json()['protocols'] - assert "NFSV3" not in protocols, results.text - assert "NFSV4" in protocols, results.text - - s = parse_server_config() - assert s['nfsd']["vers3"] == 'n', str(s) - assert s['nfsd']["vers4"] == 'y', str(s) - - # Confirm setting has taken effect: v4->on, v3->off - confirm_nfs_version(['4']) - - # Finally turn both back on again - results = PUT("/nfs/", {"protocols": ["NFSV3", "NFSV4"]}) - assert results.status_code == 200, results.text - - results = GET("/nfs") - assert results.status_code == 200, results.text - protocols = results.json()['protocols'] - assert "NFSV3" in protocols, results.text - assert "NFSV4" in protocols, results.text - - s = parse_server_config() - assert s['nfsd']["vers3"] == 'y', str(s) - assert s['nfsd']["vers4"] == 'y', str(s) - - # Confirm setting has taken effect: v4->on, v3->on - confirm_nfs_version(['3', '4']) - - -@pytest.mark.dependency(depends=['NFS_SERVICE_STARTED']) -def test_40_check_nfs_service_udp_parameter(): - """ - This test verifies the udp config is NOT in the DB and - that it is NOT in the etc file. - """ - - # The 'udp' setting should have been removed - nfs_conf = call('nfs.config') - assert nfs_conf.get('udp') is None, nfs_conf - - s = parse_server_config() - assert s.get('nfsd', {}).get('udp') is None, s - - -@pytest.mark.dependency(name='NFS_INIT') -def test_41_check_nfs_service_ports(): - """ - This test verifies that the custom ports we specified in - earlier NFS tests are set in the relevant files and are active. - """ - with nfs_config() as config_db: - # Compare DB with setting in /etc/nfs.conf.d/local.conf - s = parse_server_config() - assert int(s['mountd']['port']) == config_db["mountd_port"], str(s) - assert int(s['statd']['port']) == config_db["rpcstatd_port"], str(s) - assert int(s['lockd']['port']) == config_db["rpclockd_port"], str(s) - - # Confirm port settings are active - confirm_rpc_port('mountd', config_db["mountd_port"]) - confirm_rpc_port('status', config_db["rpcstatd_port"]) - confirm_rpc_port('nlockmgr', config_db["rpclockd_port"]) - - -@pytest.mark.dependency(depends=['NFSID_SHARE_CREATED']) -def test_42_check_nfs_client_status(): - """ - This test checks the function of API endpoints to list NFSv3 and - NFSv4 clients by performing loopback mounts on the remote TrueNAS - server and then checking client counts. Due to inherent imprecision - of counts over NFSv3 protcol (specifically with regard to decrementing - sessions) we only verify that count is non-zero for NFSv3. - """ - - with SSH_NFS(truenas_server.ip, NFS_PATH, vers=3, user=user, password=password, ip=truenas_server.ip): - results = GET('/nfs/get_nfs3_clients/', payload={ - 'query-filters': [], - 'query-options': {'count': True} - }) - assert results.status_code == 200, results.text - assert results.json() != 0, results.text - - with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, user=user, password=password, ip=truenas_server.ip): - results = GET('/nfs/get_nfs4_clients/', payload={ - 'query-filters': [], - 'query-options': {'count': True} - }) - assert results.status_code == 200, results.text - assert results.json() == 1, results.text - - -@pytest.mark.timeout(600) -@pytest.mark.dependency(depends=['NFS_SERVICE_STARTED']) -def test_43_check_nfsv4_acl_support(): - """ - This test validates reading and setting NFSv4 ACLs through an NFSv4 - mount in the following manner for NFSv4.2, NFSv4.1 & NFSv4.0: - 1) Create and locally mount an NFSv4 share on the TrueNAS server - 2) Iterate through all possible permissions options and set them - via an NFS client, read back through NFS client, and read resulting - ACL through the filesystem API. - 3) Repeat same process for each of the supported ACE flags. - 4) For NFSv4.1 or NFSv4.2, repeat same process for each of the - supported acl_flags. - """ - acl_nfs_path = f'/mnt/{pool_name}/test_nfs4_acl' - test_perms = { - "READ_DATA": True, - "WRITE_DATA": True, - "EXECUTE": True, - "APPEND_DATA": True, - "DELETE_CHILD": True, - "DELETE": True, - "READ_ATTRIBUTES": True, - "WRITE_ATTRIBUTES": True, - "READ_NAMED_ATTRS": True, - "WRITE_NAMED_ATTRS": True, - "READ_ACL": True, - "WRITE_ACL": True, - "WRITE_OWNER": True, - "SYNCHRONIZE": True - } - test_flags = { - "FILE_INHERIT": True, - "DIRECTORY_INHERIT": True, - "INHERIT_ONLY": False, - "NO_PROPAGATE_INHERIT": False, - "INHERITED": False - } - for (version, test_acl_flag) in [(4, True), (4.1, True), (4.0, False)]: - theacl = [ - {"tag": "owner@", "id": -1, "perms": test_perms, "flags": test_flags, "type": "ALLOW"}, - {"tag": "group@", "id": -1, "perms": test_perms, "flags": test_flags, "type": "ALLOW"}, - {"tag": "everyone@", "id": -1, "perms": test_perms, "flags": test_flags, "type": "ALLOW"}, - {"tag": "USER", "id": 65534, "perms": test_perms, "flags": test_flags, "type": "ALLOW"}, - {"tag": "GROUP", "id": 666, "perms": test_perms.copy(), "flags": test_flags.copy(), "type": "ALLOW"}, - ] - with nfs_dataset("test_nfs4_acl", {"acltype": "NFSV4", "aclmode": "PASSTHROUGH"}, theacl): - with nfs_share(acl_nfs_path): - with SSH_NFS(truenas_server.ip, acl_nfs_path, vers=version, user=user, password=password, ip=truenas_server.ip) as n: - nfsacl = n.getacl(".") - for idx, ace in enumerate(nfsacl): - assert ace == theacl[idx], str(ace) - - for perm in test_perms.keys(): - if perm == 'SYNCHRONIZE': - # break in SYNCHRONIZE because Linux tool limitation - break - - theacl[4]['perms'][perm] = False - n.setacl(".", theacl) - nfsacl = n.getacl(".") - for idx, ace in enumerate(nfsacl): - assert ace == theacl[idx], str(ace) - - payload = { - 'path': acl_nfs_path, - 'simplified': False - } - result = POST('/filesystem/getacl/', payload) - assert result.status_code == 200, result.text - - for idx, ace in enumerate(result.json()['acl']): - assert ace == nfsacl[idx], str(ace) - - for flag in ("INHERIT_ONLY", "NO_PROPAGATE_INHERIT"): - theacl[4]['flags'][flag] = True - n.setacl(".", theacl) - nfsacl = n.getacl(".") - for idx, ace in enumerate(nfsacl): - assert ace == theacl[idx], str(ace) - - payload = { - 'path': acl_nfs_path, - 'simplified': False - } - result = POST('/filesystem/getacl/', payload) - assert result.status_code == 200, result.text - - for idx, ace in enumerate(result.json()['acl']): - assert ace == nfsacl[idx], str(ace) - if test_acl_flag: - assert 'none' == n.getaclflag(".") - for acl_flag in ['auto-inherit', 'protected', 'defaulted']: - n.setaclflag(".", acl_flag) - assert acl_flag == n.getaclflag(".") - payload = { - 'path': acl_nfs_path, - 'simplified': False - } - result = POST('/filesystem/getacl/', payload) - assert result.status_code == 200, result.text - # Normalize the flag_is_set name for comparision to plugin equivalent - # (just remove the '-' from auto-inherit) - if acl_flag == 'auto-inherit': - flag_is_set = 'autoinherit' - else: - flag_is_set = acl_flag - # Now ensure that only the expected flag is set - nfs41_flags = result.json()['nfs41_flags'] - for flag in ['autoinherit', 'protected', 'defaulted']: - if flag == flag_is_set: - assert nfs41_flags[flag], nfs41_flags - else: - assert not nfs41_flags[flag], nfs41_flags - - -def test_44_check_nfs_xattr_support(): - """ - Perform basic validation of NFSv4.2 xattr support. - Mount path via NFS 4.2, create a file and dir, - and write + read xattr on each. - """ - xattr_nfs_path = f'/mnt/{pool_name}/test_nfs4_xattr' - with nfs_dataset("test_nfs4_xattr"): - with nfs_share(xattr_nfs_path): - with SSH_NFS(truenas_server.ip, xattr_nfs_path, vers=4.2, user=user, password=password, ip=truenas_server.ip) as n: - n.create("testfile") - n.setxattr("testfile", "user.testxattr", "the_contents") - xattr_val = n.getxattr("testfile", "user.testxattr") - assert xattr_val == "the_contents" - - n.create("testdir", True) - n.setxattr("testdir", "user.testxattr2", "the_contents2") - xattr_val = n.getxattr("testdir", "user.testxattr2") - assert xattr_val == "the_contents2" - - -def test_45_check_setting_runtime_debug(): - """ - This validates that the private NFS debugging API works correctly. - """ - disabled = { - "NFS": ["NONE"], - "NFSD": ["NONE"], - "NLM": ["NONE"], - "RPC": ["NONE"] - } - enabled = { - "NFS": ["PROC", "XDR", "CLIENT", "MOUNT", "XATTR_CACHE"], - "NFSD": ["ALL"], - "NLM": ["CLIENT", "CLNTLOCK", "SVC"], - "RPC": ["CALL", "NFS", "TRANS"] - } - failure = {"RPC": ["CALL", "NFS", "TRANS", "NONE"]} - - try: - assert call('nfs.get_debug') == disabled - assert call('nfs.set_debug', enabled) - debug_values = call('nfs.get_debug') - assert all((set(enabled[i]) == set(debug_values[i]) for i in debug_values)), debug_values - - with pytest.raises(Exception) as ve: - # This should generate an ValueError exception on the system - call('nfs.set_debug', failure) - assert "Cannot specify another value with NONE" in str(ve.value), ve - finally: - assert call('nfs.set_debug', disabled) - debug_values = call('nfs.get_debug') - assert all((set(disabled[i]) == set(debug_values[i]) for i in debug_values)), debug_values - - -def test_46_set_bind_ip(): - ''' - This test requires a static IP address - * Test the private nfs.bindip call - * Test the actual bindip config setting - - Confirm setting in conf files - - Confirm service on IP address - ''' - choices = call("nfs.bindip_choices") - assert truenas_server.ip in choices - - call("nfs.bindip", {"bindip": [truenas_server.ip]}) - call("nfs.bindip", {"bindip": []}) - - # Test config with bindip. Use choices from above - # TODO: check with 'nmap -sT ' from the runner. - with nfs_config() as db_conf: - - # Should have no bindip setting - nfs_conf = parse_server_config() - rpc_conf = parse_rpcbind_config() - assert db_conf['bindip'] == [] - assert nfs_conf['nfsd'].get('host') is None - assert rpc_conf.get('-h') is None - - # Set bindip - call("nfs.update", {"bindip": [truenas_server.ip]}) - - # Confirm we see it in the nfs and rpc conf files - nfs_conf = parse_server_config() - rpc_conf = parse_rpcbind_config() - assert truenas_server.ip in nfs_conf['nfsd'].get('host'), f"nfs_conf = {nfs_conf}" - assert truenas_server.ip in rpc_conf.get('-h'), f"rpc_conf = {rpc_conf}" - - -@pytest.mark.dependency(depends=['NFSID_SHARE_CREATED']) -def test_48_syslog_filters(): - """ - This test checks the function of the mountd_log setting to filter - rpc.mountd messages that have priority DEBUG to NOTICE. - We performing loopback mounts on the remote TrueNAS server and - then check the syslog for rpc.mountd messages. Outside of SSH_NFS - we test the umount case. - """ - with nfs_config(): - - # The effect is much more clear if there are many mountd. - # We can force this by configuring many nfsd - call("nfs.update", {"servers": 24}) - - # Confirm default setting: mountd logging enabled - call("nfs.update", {"mountd_log": True}) - - # Add dummy entries to avoid false positives - for i in range(10): - ssh(f'logger "====== {i}: NFS test_48_syslog_filters (with) ======"') - - # Sometimes the mount messages in syslog can take over a minute to appear. - # Sometimes the messages are output nearly immediately. - # We have syslog already configured to output nearly immediately. - # This retry loop is to prevent false failures on the slow response condition - # and not time penalize the quick response condition - with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, user=user, password=password, ip=truenas_server.ip): - # Increase num_tries if necessary - num_tries = tries_remaining = 12 - found = False - res = "" - while not found and tries_remaining > 0: - res = ssh("tail -30 /var/log/syslog") - if "rpc.mountd" in res: - found = True - break - tries_remaining -= 1 - sleep(num_tries - tries_remaining) - - assert found, f"Expected to find 'rpc.mountd' in the output but found:\n{res}" - - # NOTE: Additional mountd messages will get logged on unmount at the exit of the 'with' - - # Disable mountd logging - call("nfs.update", {"mountd_log": False}) - - # Add dummy entries to avoid false positives - for i in range(10): - ssh(f'logger "====== {i}: NFS test_48_syslog_filters (without) ======"') - with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, user=user, password=password, ip=truenas_server.ip): - # wait a few seconds to make sure syslog has a chance to flush log messages - sleep(4) - res = ssh("tail -10 /var/log/syslog") - assert "rpc.mountd" not in res, f"Did not expect to find 'rpc.mountd' in the output but found:\n{res}" - - # Get a second chance to catch mountd messages on the umount. They should not be present. - sleep(4) - res = ssh("tail -10 /var/log/syslog") - assert "rpc.mountd" not in res, f"Did not expect to find 'rpc.mountd' in the output but found:\n{res}" - - -@pytest.mark.dependency(depends=['NFSID_SHARE_CREATED']) -@pytest.mark.parametrize('type,data', [ - ('InvalidAssignment', [ - {'maproot_user': 'baduser'}, 'maproot_user', 'User not found: baduser' - ]), - ('InvalidAssignment', [ - {'maproot_group': 'badgroup'}, 'maproot_user', 'This field is required when map group is specified' - ]), - ('InvalidAssignment', [ - {'mapall_user': 'baduser'}, 'mapall_user', 'User not found: baduser' - ]), - ('InvalidAssignment', [ - {'mapall_group': 'badgroup'}, 'mapall_user', 'This field is required when map group is specified' - ]), - ('MissingUser', [ - 'maproot_user', 'missinguser' - ]), - ('MissingUser', [ - 'mapall_user', 'missinguser' - ]), - ('MissingGroup', [ - 'maproot_group', 'missingroup' - ]), - ('MissingGroup', [ - 'mapall_group', 'missingroup' - ]), -]) -def test_50_nfs_invalid_user_group_mapping(type, data): - ''' - Verify we properly trap and handle invalid user and group mapping - Two conditions: - 1) Catch invalid assignments - 2) Catch invalid settings at NFS start - ''' - - ''' Local helper routine ''' - def run_missing_usrgrp_test(usrgrp, tmp_path, share, usrgrpInst): - parsed = parse_exports() - assert len(parsed) == 2, str(parsed) - this_share = [entry for entry in parsed if entry['path'] == f'{tmp_path}'] - assert len(this_share) == 1, f"Did not find share {tmp_path}.\nexports = {parsed}" - - # Remove the user/group and restart nfs - call(f'{usrgrp}.delete', usrgrpInst['id']) - call('service.restart', 'nfs') - - # An alert should be generated - alerts = call('alert.list') - this_alert = [entry for entry in alerts if entry['klass'] == "NFSexportMappingInvalidNames"] - assert len(this_alert) == 1, f"Did not find alert for 'NFSexportMappingInvalidNames'.\n{alerts}" - - # The NFS export should have been removed - parsed = parse_exports() - assert len(parsed) == 1, str(parsed) - this_share = [entry for entry in parsed if entry['path'] == f'{tmp_path}'] - assert len(this_share) == 0, f"Unexpectedly found share {tmp_path}.\nexports = {parsed}" - - # Modify share to map with a built-in user or group and restart NFS - call('sharing.nfs.update', share, {data[0]: "ftp"}) - call('service.restart', 'nfs') - - # The alert should be cleared - alerts = call('alert.list') - this_alert = [entry for entry in alerts if entry['key'] == "NFSexportMappingInvalidNames"] - assert len(this_alert) == 0, f"Unexpectedly found alert 'NFSexportMappingInvalidNames'.\n{alerts}" - - # Share should have been restored - parsed = parse_exports() - assert len(parsed) == 2, str(parsed) - this_share = [entry for entry in parsed if entry['path'] == f'{tmp_path}'] - assert len(this_share) == 1, f"Did not find share {tmp_path}.\nexports = {parsed}" - - ''' Test Processing ''' - with directory(f'{NFS_PATH}/sub1') as tmp_path: - - if type == 'InvalidAssignment': - payload = {'path': tmp_path} | data[0] - with pytest.raises(ValidationErrors) as ve: - call("sharing.nfs.create", payload) - assert ve.value.errors == [ValidationError('sharingnfs_create.' + f'{data[1]}', data[2], 22)] - - elif type == 'MissingUser': - usrname = data[1] - testkey, testval = data[0].split('_') - - usr_payload = {'username': usrname, 'full_name': usrname, - 'group_create': True, 'password': 'abadpassword'} - mapping = {data[0]: usrname} - with create_user(usr_payload) as usrInst: - with nfs_share(tmp_path, mapping) as share: - run_missing_usrgrp_test(testval, tmp_path, share, usrInst) - - elif type == 'MissingGroup': - # Use a built-in user for the group test - grpname = data[1] - testkey, testval = data[0].split('_') - - mapping = {f"{testkey}_user": 'ftp', data[0]: grpname} - with create_group({'name': grpname}) as grpInst: - with nfs_share(tmp_path, mapping) as share: - run_missing_usrgrp_test(testval, tmp_path, share, grpInst) - - -@pytest.mark.parametrize('state,expected', [ - (None, 'n'), # Test default state - (True, 'y'), - (False, 'n') -]) -def test_52_manage_gids(state, expected): - ''' - The nfsd_manage_gids setting is called "Support > 16 groups" in the webui. - It is that and, to a greater extent, defines the GIDs that are used for permissions. - - If NOT enabled, then the expectation is that the groups to which the user belongs - are defined on the _client_ and NOT the server. It also means groups to which the user - belongs are passed in on the NFS commands from the client. The file object GID is - checked against the passed in list of GIDs. This is also where the 16 group - limitation is enforced. The NFS protocol allows passing up to 16 groups per user. - - If nfsd_manage_gids is enabled, the groups to which the user belong are defined - on the server. In this condition, the server confirms the user is a member of - the file object GID. - - NAS-126067: Debian changed the 'default' setting to manage_gids in /etc/nfs.conf - from undefined to "manage_gids = y". - - TEST: Confirm manage_gids is set in /etc/nfs.conf.d/local/conf for - both the enable and disable states - - TODO: Add client-side and server-side test from client when available - ''' - with nfs_config(): - - if state is not None: - sleep(3) # In Cobia: Prevent restarting NFS too quickly. - call("nfs.update", {"userd_manage_gids": state}) - # Allow config file to be updated - sleep(1) - - s = parse_server_config() - assert s['mountd']['manage-gids'] == expected, str(s) - - -@pytest.mark.dependency(depends=['NFS_SERVICE_STARTED']) -def test_54_v4_domain(): - ''' - The v4_domain configuration item maps to the 'Domain' setting in - the [General] section of /etc/idmapd.conf. - It is described as: - The local NFSv4 domain name. An NFSv4 domain is a namespace - with a unique username<->UID and groupname<->GID mapping. - (Default: Host's fully-qualified DNS domain name) - ''' - - with nfs_config() as nfs_db: - # By default, v4_domain is not set - assert nfs_db['v4_domain'] == "", f"Expected zero-len string, but found {nfs_db['v4_domain']}" - s = parse_server_config("idmapd") - assert s['General'].get('Domain') is None, f"'Domain' was not expected to be set: {s}" - - # Make a setting change and confirm - db = call('nfs.update', {"v4_domain": "ixsystems.com"}) - assert db['v4_domain'] == 'ixsystems.com', f"v4_domain failed to be updated in nfs DB: {db}" - # Allow config file to be updated - sleep(1) - s = parse_server_config("idmapd") - assert s['General'].get('Domain') == 'ixsystems.com', f"'Domain' failed to be updated in idmapd.conf: {s}" - - -def test_70_stopping_nfs_service(): - # Restore original settings before we stop - call('nfs.update', NFS_CONFIG.default_nfs_config) - payload = {"service": "nfs"} - results = POST("/service/stop/", payload) - assert results.status_code == 200, results.text - sleep(1) - - -def test_71_checking_to_see_if_nfs_service_is_stop(): - results = GET("/service?service=nfs") - assert results.json()[0]["state"] == "STOPPED", results.text - - -def test_72_check_adjusting_threadpool_mode(): - """ - Verify that NFS thread pool configuration can be adjusted - through private API endpoints. - - This will fail if NFS server (or NFS client) is still running. - """ - for m in ('AUTO', 'PERCPU', 'PERNODE', 'GLOBAL'): - call('nfs.set_threadpool_mode', m) - assert call('nfs.get_threadpool_mode') == m - - -def test_74_disable_nfs_service_at_boot(): - results = PUT("/service/id/nfs/", {"enable": False}) - assert results.status_code == 200, results.text - - -def test_75_checking_nfs_disable_at_boot(): - results = GET("/service?service=nfs") - assert results.json()[0]['enable'] is False, results.text - - -def test_76_destroying_smb_dataset(): - results = DELETE(f"/pool/dataset/id/{dataset_url}/") - assert results.status_code == 200, results.text - - -@pytest.mark.parametrize('exports', ['missing', 'empty']) -def test_80_start_nfs_service_with_missing_or_empty_exports(exports): - ''' - NAS-123498: Eliminate conditions on exports for service start - The goal is to make the NFS server behavior similar to the other protocols - ''' - if exports == 'empty': - results = SSH_TEST("echo '' > /etc/exports", user, password) - else: # 'missing' - results = SSH_TEST("rm -f /etc/exports", user, password) - assert results['result'] is True - - with nfs_config() as nfs_conf: - # Start NFS - call('service.start', 'nfs', {'silent': False}) - sleep(1) - confirm_nfsd_processes(nfs_conf['servers']) - - # Return NFS to stopped condition - payload = {"service": "nfs"} - results = POST("/service/stop/", payload) - assert results.status_code == 200, results.text - sleep(1) - - # Confirm stopped - results = GET("/service?service=nfs") - assert results.json()[0]["state"] == "STOPPED", results.text - - -@pytest.mark.parametrize('expect_NFS_start', [False, True]) -def test_82_files_in_exportsd(expect_NFS_start): - ''' - Any files in /etc/exports.d are potentially dangerous, especially zfs.exports. - We implemented protections against rogue exports files. - - We block starting NFS if there are any files in /etc/exports.d - - We generate an alert when we detect this condition - - We clear the alert when /etc/exports.d is empty - ''' - fail_check = {False: 'ConditionDirectoryNotEmpty=!/etc/exports.d', True: None} - - # Simple helper function for this test - def set_immutable_state(want_immutable=True): - call('filesystem.set_immutable', want_immutable, '/etc/exports.d') - assert call('filesystem.is_immutable', '/etc/exports.d') is want_immutable - - try: - # Setup the test - set_immutable_state(want_immutable=False) # Disable immutable - - # Do the 'failing' case first to end with a clean condition - if not expect_NFS_start: - results = SSH_TEST("echo 'bogus data' > /etc/exports.d/persistent.file", user, password) - assert results['result'] is True - results = SSH_TEST("chattr +i /etc/exports.d/persistent.file", user, password) - assert results['result'] is True - else: - # Restore /etc/exports.d directory to a clean state - results = SSH_TEST("chattr -i /etc/exports.d/persistent.file", user, password) - assert results['result'] is True - results = SSH_TEST("rm -rf /etc/exports.d/*", user, password) - assert results['result'] is True - - set_immutable_state(want_immutable=True) # Enable immutable - - set_nfs_service_state('start', expect_NFS_start, fail_check[expect_NFS_start]) - - finally: - # In all cases we want to end with NFS stopped - set_nfs_service_state('stop') - - # If NFS start is blocked, then an alert should have been raised - alerts = call('alert.list') - if not expect_NFS_start: - # Find alert - assert any(alert["klass"] == "NFSblockedByExportsDir" for alert in alerts), alerts - else: # Alert should have been cleared - assert not any(alert["klass"] == "NFSblockedByExportsDir" for alert in alerts), alerts diff --git a/tests/api2/test_310_service_announcement.py b/tests/api2/test_310_service_announcement.py deleted file mode 100644 index 97a45599459c9..0000000000000 --- a/tests/api2/test_310_service_announcement.py +++ /dev/null @@ -1,479 +0,0 @@ -import contextlib -import random -import re -import socket -import string -from datetime import datetime, timedelta -from time import sleep -from typing import cast - -import pytest -from assets.websocket.server import reboot -from assets.websocket.service import (ensure_service_disabled, - ensure_service_enabled, - ensure_service_started, - ensure_service_stopped) -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.utils.client import truenas_server -from pytest_dependency import depends -from zeroconf import ServiceBrowser, ServiceStateChange, Zeroconf - -from auto_config import ha, password, pool_name, user -from functions import SSH_TEST -from protocols import smb_share - -digits = ''.join(random.choices(string.digits, k=4)) -dataset_name = f"smb-cifs{digits}" -SMB_NAME1 = f"TestCifsSMB{digits}" -SMB_PATH1 = f"/mnt/{pool_name}/{dataset_name}" - -dataset_name2 = f"other{digits}" -SMB_NAME2 = f"OtherTestSMB{digits}" -SMB_PATH2 = f"/mnt/{pool_name}/{dataset_name2}" - -# Service names -TIME_MACHINE = '_adisk._tcp.local.' # Automatic Disk -DEVICE_INFO = '_device-info._tcp.local.' # Device Info -HTTP = '_http._tcp.local.' -SMB = '_smb._tcp.local.' -NUT = '_nut._tcp' - -DO_MDNS_REBOOT_TEST = False -USE_AVAHI_BROWSE = True -skip_avahi_browse_tests = pytest.mark.skipif(USE_AVAHI_BROWSE, reason="Skip tests broken by use of avahi-browse") - - -def _get_tm_props(rec, key): - result = {} - for pair in rec['properties'][key].decode('utf-8').split(','): - k, v = pair.split('=') - result[k] = v - return result - - -def allow_settle(delay=3): - # Delay slightly to allow things to propagate - sleep(delay) - - -@contextlib.contextmanager -def service_announcement_config(config): - if not config: - yield - else: - old_config = call('network.configuration.config')['service_announcement'] - call('network.configuration.update', {'service_announcement': config}) - try: - yield - finally: - call('network.configuration.update', {'service_announcement': old_config}) - - -@contextlib.contextmanager -def ensure_aapl_extensions(): - # First check - enabled = call('smb.config')['aapl_extensions'] - if enabled: - yield - else: - call('smb.update', {'aapl_extensions': True}) - try: - yield - finally: - call('smb.update', {'aapl_extensions': False}) - - -def wait_for_avahi_startup(interval=5, timeout=300): - """When tests are running in a QE environment it can take a long - time for the service to start up completely, because many systems - can be configured with the same hostname. - - This function will detect the most recent avahi-daemon startup and - wait for it to complete""" - command = 'journalctl --no-pager -u avahi-daemon --since "10 minute ago"' - brackets = re.compile(r'[\[\]]+') - while timeout > 0: - startup = None - ssh_out = SSH_TEST(command, user, password) - assert ssh_out['result'], str(ssh_out) - output = ssh_out['output'] - # First we just look for the most recent startup command - for line in output.split('\n'): - if line.endswith('starting up.'): - startup = line - if startup: - pid = brackets.split(startup)[1] - completion = f'avahi-daemon[{pid}]: Server startup complete.' - for line in output.split('\n'): - if completion in line: - # Did we just complete - finish_plus_five = (datetime.strptime(line.split()[2], "%H:%M:%S") + timedelta(seconds=5)).time() - if finish_plus_five > datetime.now().time(): - # Wait 5 seconds to ensure services are published - sleep(5) - return True - sleep(interval) - timeout -= interval - return False - - -class ZeroconfCollector: - - def on_service_state_change(self, zeroconf, service_type, name, state_change): - - if state_change is ServiceStateChange.Added: - info = zeroconf.get_service_info(service_type, name) - if info: - item = {} - item['addresses'] = [addr for addr in info.parsed_scoped_addresses()] - if self.ip not in item['addresses']: - return - item['port'] = cast(int, info.port) - item['server'] = info.server - if info.properties: - item['properties'] = {} - for key, value in info.properties.items(): - if key: - item['properties'][key] = value - else: - item['properties'] = {} - self.result[service_type][name] = item - self.update_internal_hostname(item['server']) - - def find_items(self, service_announcement=None, timeout=5): - self.result = {} - for service in self.SERVICES: - self.result[service] = {} - with service_announcement_config(service_announcement): - assert wait_for_avahi_startup(), "Failed to detect avahi-daemon startup" - zeroconf = Zeroconf() - ServiceBrowser(zeroconf, self.SERVICES, handlers=[self.on_service_state_change]) - try: - sleep(timeout) - finally: - zeroconf.close() - return self.result - - def clear_cache(self): - # No-op for zeroconf collector - pass - - -class AvahiBrowserCollector: - - name_to_service = { - 'Device Info': DEVICE_INFO, - 'Web Site': HTTP, - 'Microsoft Windows Network': SMB, - 'Apple TimeMachine': TIME_MACHINE, - '_nut._tcp': NUT, - } - - def find_items(self, service_announcement=None, timeout=5): - self.result = {} - for service in self.SERVICES: - self.result[service] = {} - with service_announcement_config(service_announcement): - assert wait_for_avahi_startup(), "Failed to detect avahi-daemon startup" - # ssh_out = SSH_TEST("avahi-browse -v --all -t -p --resolve", user, password) - # Appears sometimes we need a little more time - ssh_out = SSH_TEST("timeout --preserve-status 5 avahi-browse -v --all -p --resolve", user, password) - assert ssh_out['result'], str(ssh_out) - output = ssh_out['output'] - for line in output.split('\n'): - item = {} - items = line.split(';') - if len(items) > 1 and items[0] == '=': - if len(items) == 10: - server = items[3] - pub_ip = items[7] - if pub_ip not in self.ips: - continue - item['addresses'] = [pub_ip] - item['port'] = items[8] - item['server'] = items[6] - service_type = AvahiBrowserCollector.name_to_service[items[4]] - key = f"{server}.{service_type}" - item['properties'] = self.process_properties(items[9], service_type) - self.result[service_type][key] = item - self.update_internal_hostname(item['server']) - return self.result - - def process_properties(self, txts, service_type): - props = {} - for txt in txts.split(): - if txt.startswith('"') and txt.endswith('"'): - txt = txt[1:-1] - for prop in ['model', 'dk0', 'dk1', 'sys']: - if txt.startswith(f"{prop}="): - props[prop.encode('utf-8')] = txt[len(prop) + 1:].encode('utf-8') - return props - - def clear_cache(self): - # We need to restart the avahi-daemon to clear cache - # print("Clearing cache") - ssh("systemctl restart avahi-daemon") - - @staticmethod - def get_ipv6(ip): - """Given an IPv4 address string, find the IPv6 on the same - interface (if present). Returns either the IPv6 address as - a string, or None""" - ips = call('network.general.summary')['ips'] - for interface in ips: - matched = False - if 'IPV4' in ips[interface]: - for ipv4 in ips[interface]['IPV4']: - if ipv4.split('/')[0] == ip: - matched = True - break - if matched and 'IPV6' in ips[interface]: - for ipv6 in ips[interface]['IPV6']: - return ipv6.split('/')[0] - return None - - -class abstractmDNSAnnounceCollector: - """ - Class to help in the discovery (and processing/checking) - of services advertised by a particular IP address/server name. - """ - SERVICES = [TIME_MACHINE, DEVICE_INFO, HTTP, SMB, NUT] - - def __init__(self, ip, tn_hostname): - self.ip = socket.gethostbyname(ip) - self.hostname = self.tn_hostname = tn_hostname - - def update_internal_hostname(self, published_hostname): - """If there has been a conflict then it is possible that a derivative - of the original hostname is being used. Check whether this the - published name could be a conflict-resolved name and if so, - update the hostname that will be used during checks. - """ - if published_hostname == self.tn_hostname: - return - possible_new_hostname = published_hostname.split('.')[0] - if possible_new_hostname == self.hostname: - return - # Check whether either 'hostname-...' or ' #...' - if possible_new_hostname.split()[0].split('-')[0] == self.tn_hostname: - self.hostname = possible_new_hostname - - def has_service_type(self, hostname, service_type): - if not hostname: - hostname = self.hostname - key = f"{hostname}.{service_type}" - return key in self.result[service_type] - - def get_service_type(self, hostname, service_type): - if not hostname: - hostname = self.hostname - key = f"{hostname}.{service_type}" - if key in self.result[service_type]: - return self.result[service_type][key] - - def has_time_machine(self, hostname=None): - return self.has_service_type(hostname, TIME_MACHINE) - - def has_device_info(self, hostname=None): - return self.has_service_type(hostname, DEVICE_INFO) - - def has_http(self, hostname=None): - return self.has_service_type(hostname, HTTP) - - def has_smb(self, hostname=None): - return self.has_service_type(hostname, SMB) - - def time_machine(self, hostname=None): - return self.get_service_type(hostname, TIME_MACHINE) - - def check_present(self, device_info=True, http=True, smb=True, time_machine=True, hostname=None): - assert self.has_device_info(hostname) == device_info, self.result[DEVICE_INFO] - assert self.has_http(hostname) == http, self.result[HTTP] - assert self.has_smb(hostname) == smb, self.result[SMB] - assert self.has_time_machine(hostname) == time_machine, self.result[TIME_MACHINE] - - -if USE_AVAHI_BROWSE: - class mDNSAnnounceCollector(abstractmDNSAnnounceCollector, AvahiBrowserCollector): - def __init__(self, ip, tn_hostname): - abstractmDNSAnnounceCollector.__init__(self, ip, tn_hostname) - # avahi-browse can report either an IPv4 address or the - # corresponding IPv6 address if configured on the same interface - # So we will expand our inclusion check to encompass both. - ipv6 = AvahiBrowserCollector.get_ipv6(self.ip) - if ipv6: - self.ips = [self.ip, ipv6] - else: - self.ips = [self.ip] -else: - class mDNSAnnounceCollector(abstractmDNSAnnounceCollector, ZeroconfCollector): - pass - - -@pytest.fixture(autouse=True, scope="module") -def setup_environment(): - try: - with ensure_service_disabled('cifs'): - with ensure_service_stopped('cifs'): - yield - finally: - pass - - -@pytest.mark.timeout(600) -@pytest.mark.dependency(name="servann_001") -def test_001_initial_config(request): - """Ensure that the service announcement configuration is as expected.""" - global current_hostname - - network_config = call('network.configuration.config') - sa = network_config['service_announcement'] - if ha: - current_hostname = network_config['hostname_virtual'] - else: - current_hostname = network_config['hostname'] - # At the moment we only care about mdns - assert sa['mdns'] is True, sa - - # Let's restart avahi (in case we've updated middleware) - call('service.restart', 'mdns') - ac = mDNSAnnounceCollector(truenas_server.ip, current_hostname) - ac.find_items() - ac.check_present(smb=False, time_machine=False) - - -# This test is broken by the use of avahi-browse as when it is -# called it re-activates the avahi-daemon by means of the -# avahi-daemon.socket. -# The DEV and HTTP service files have NOT been deleted upon -# a service stop, so this reactivation causes the test to -# fail. -# Since the test passes when run with zeroconf library on -# a suitably connected test-runner, no real need to chase. -@pytest.mark.timeout(600) -@skip_avahi_browse_tests -def test_002_mdns_disabled(request): - depends(request, ["servann_001"], scope="session") - ac = mDNSAnnounceCollector(truenas_server.ip, current_hostname) - ac.clear_cache() - ac.find_items({'mdns': False, 'wsd': True, 'netbios': False}) - ac.check_present(False, False, False, False) - - -# Setting a VERY long timeout as when this test is run in isolation -# on jenkins there can be many (20+) hostname clashes which means -# avahi can take a LONG time to settle down/start up. -# -# We could avoid by setting a unique hostname (as is done during a -# full test run), but it also seems worthwhile exercise to be able -# to test in such a unsuitable environment. -@pytest.mark.timeout(900) -def test_003_mdns_smb_share(request): - """Perform some mDNS tests wrt SMB and ADISK services.""" - depends(request, ["servann_001"], scope="session") - - # SMB is not started originally - ac = mDNSAnnounceCollector(truenas_server.ip, current_hostname) - ac.find_items() - ac.check_present(smb=False, time_machine=False) - - with dataset(dataset_name): - with smb_share(SMB_PATH1, {'name': SMB_NAME1, 'comment': 'Test SMB Share'}): - # SMB is still not started - ac.find_items() - ac.check_present(smb=False, time_machine=False) - with ensure_service_started('cifs'): - allow_settle() - ac.find_items() - ac.check_present(time_machine=False) - # OK, the SMB is stopped again, Ensure we don't advertise SMB anymore - ac.clear_cache() - ac.find_items() - ac.check_present(smb=False, time_machine=False) - - # Now we're going to setup a time machine share - with ensure_aapl_extensions(): - with ensure_service_started('cifs'): - allow_settle() - # Check mDNS before we have a time machine share - ac.find_items() - ac.check_present(time_machine=False) - with smb_share(SMB_PATH1, {'name': SMB_NAME1, - 'comment': 'Basic TM SMB Share', - 'purpose': 'TIMEMACHINE'}) as shareID1: - allow_settle() - # Check mDNS now we have a time machine share - ac.find_items() - ac.check_present() - - # Now read the share details and then check against what mDNS reported - share1 = call('sharing.smb.query', [['id', '=', shareID1]])[0] - - tm = ac.time_machine() - props = _get_tm_props(tm, b'dk0') - assert props['adVN'] == SMB_NAME1, props - assert props['adVF'] == '0x82', props - assert props['adVU'] == share1['vuid'], props - # Now make another time machine share - with dataset(dataset_name2): - with smb_share(SMB_PATH2, {'name': SMB_NAME2, - 'comment': 'Multiuser TM SMB Share', - 'purpose': 'ENHANCED_TIMEMACHINE'}) as shareID2: - share2 = call('sharing.smb.query', [['id', '=', shareID2]])[0] - allow_settle() - ac.find_items() - ac.check_present() - tm = ac.time_machine() - props0 = _get_tm_props(tm, b'dk0') - props1 = _get_tm_props(tm, b'dk1') - assert props0['adVF'] == '0x82', props0 - assert props1['adVF'] == '0x82', props1 - # Let's not make any assumption about which share is which - if props0['adVN'] == SMB_NAME1: - # SHARE 1 in props0 - assert props0['adVU'] == share1['vuid'], props0 - # SHARE 2 in props1 - assert props1['adVN'] == SMB_NAME2, props1 - assert props1['adVU'] == share2['vuid'], props1 - else: - # SHARE 1 in props1 - assert props1['adVN'] == SMB_NAME1, props1 - assert props1['adVU'] == share1['vuid'], props1 - # SHARE 2 in props0 - assert props0['adVN'] == SMB_NAME2, props0 - assert props0['adVU'] == share2['vuid'], props0 - # Still have one TM share - allow_settle() - ac.find_items() - ac.check_present() - - # Check mDNS now we no longer have a time machine share - ac.clear_cache() - ac.find_items() - ac.check_present(time_machine=False) - # Finally check when SMB is stopped again - ac.clear_cache() - ac.find_items() - ac.check_present(smb=False, time_machine=False) - - -if DO_MDNS_REBOOT_TEST: - def test_004_reboot_with_mdns_smb_share(request): - """Create a time-machine SMB and check that it is published - following a reboot.""" - depends(request, ["servann_001"], scope="session") - - # First let's setup a time machine share - with dataset(dataset_name): - with smb_share(SMB_PATH1, {'name': SMB_NAME1, - 'comment': 'Basic TM SMB Share', - 'purpose': 'TIMEMACHINE'}): - with ensure_service_enabled('cifs'): - # Next reboot and then check the expected services - # are advertised. - reboot(truenas_server.ip, 'cifs') - ac = mDNSAnnounceCollector(truenas_server.ip, current_hostname) - ac.find_items() - ac.check_present() diff --git a/tests/api2/test_330_pool_acltype.py b/tests/api2/test_330_pool_acltype.py deleted file mode 100644 index 577f880a1b8b9..0000000000000 --- a/tests/api2/test_330_pool_acltype.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 - -import pytest -import sys -import os -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from auto_config import pool_name -from functions import POST, GET, PUT, DELETE, SSH_TEST -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.assets.pool import dataset as make_dataset - -test1_dataset = f'{pool_name}/test1' -dataset_url = test1_dataset.replace("/", "%2F") - - -@pytest.fixture(scope='module') -def create_test_dataset(): - with make_dataset('test1') as ds: - yield ds - - -def test_01_verify_default_acltype_from_pool_dataset_with_api(request): - results = GET(f'/pool/dataset/id/{pool_name}/') - assert results.status_code == 200, results.text - assert results.json()['acltype']['rawvalue'] == 'posix', results.text - - -def test_04_verify_test1_dataset_inherited_parent_acltype_with_api(create_test_dataset, request): - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['acltype']['rawvalue'] == 'posix', results.text - - -def test_06_change_acltype_to_nfsv4(create_test_dataset, request): - call('pool.dataset.update', test1_dataset, { - 'acltype': 'NFSV4', 'aclmode': 'PASSTHROUGH' - }) - - res = call('zfs.dataset.query', [['id', '=', test1_dataset]], - {'get': True, 'extra': {'retrieve_children': False}} - ) - props = res['properties'] - - assert props['acltype']['value'] == 'nfsv4', str(props) - assert props['aclmode']['value'] == 'passthrough', str(props) - assert props['aclinherit']['value'] == 'passthrough', str(props) - - -def test_07_reset_acltype_to_posix(create_test_dataset, request): - call('pool.dataset.update', test1_dataset, { - 'acltype': 'POSIX', 'aclmode': 'DISCARD' - }) - - res = call('zfs.dataset.query', [['id', '=', test1_dataset]], - {'get': True, 'extra': {'retrieve_children': False}} - ) - props = res['properties'] - - assert props['acltype']['value'] == 'posix', str(props) - assert props['aclmode']['value'] == 'discard', str(props) - assert props['aclinherit']['value'] == 'discard', str(props) diff --git a/tests/api2/test_341_pool_dataset_encryption.py b/tests/api2/test_341_pool_dataset_encryption.py deleted file mode 100644 index 78e231ef3d665..0000000000000 --- a/tests/api2/test_341_pool_dataset_encryption.py +++ /dev/null @@ -1,1130 +0,0 @@ -#!/usr/bin/env python3 - -# License: BSD - -import secrets - -import pytest -from middlewared.test.integration.utils import call -from pytest_dependency import depends - -from auto_config import password, user -from functions import DELETE, GET, POST, PUT, SSH_TEST, wait_on_job - -# genrated token_hex 32bit for -pool_token_hex = secrets.token_hex(32) -pool_token_hex2 = secrets.token_hex(32) -dataset_token_hex = secrets.token_hex(32) -dataset_token_hex2 = secrets.token_hex(32) -encrypted_pool_name = 'test_encrypted' -dataset = f'{encrypted_pool_name}/encrypted' -dataset_url = dataset.replace('/', '%2F') -child_dataset = f'{dataset}/child' -child_dataset_url = child_dataset.replace('/', '%2F') - - -@pytest.mark.dependency(name="CREATED_POOL") -def test_create_a_normal_pool(request): - global pool_id, pool_disks - # Get one disk for encryption testing - pool_disks = [call("disk.get_unused")[0]["name"]] - payload = { - 'name': encrypted_pool_name, - 'encryption': False, - 'topology': { - 'data': [ - {'type': 'STRIPE', 'disks': pool_disks} - ], - }, - "allow_duplicate_serials": True, - } - results = POST('/pool/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 240) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - pool_id = job_status['results']['result']['id'] - - -def test_create_a_passphrase_encrypted_root_on_normal_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'generate_key': False, - 'pbkdf2iters': 100000, - 'algorithm': 'AES-128-CCM', - 'passphrase': 'my_passphrase', - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'PASSPHRASE', results.text - - -def test_verify_pool_dataset_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_add_the_comment_on_the_passphrase_encrypted_root(request): - depends(request, ['CREATED_POOL']) - payload = { - 'comments': 'testing encrypted dataset' - } - results = PUT(f'/pool/dataset/id/{dataset_url}/', payload) - assert results.status_code == 200, results.text - - -def test_change_a_passphrase_encrypted_root_key_encryption(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'change_key_options': { - 'key': dataset_token_hex, - } - } - results = POST('/pool/dataset/change_key/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_verify_that_the_dataset_encrypted_root_changed_to_key_encryption(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - - -def test_delete_passphrase_encrypted_root(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_not_encrypted_dataset_on_a_normal_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption': False, - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] is None, results.text - - -def test_delete_not_encrypted_dataset(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_a_dataset_with_inherit_encryption_true_on_a_normal_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'inherit_encryption': True - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_verify_that_the_dataset_created_is_not_encrypted_like_the_parrent(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] is None, results.text - - -def test_delete_dataset(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_try_to_create_an_encrypted_dataset_with_pbkdf2itersl_zero(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'pbkdf2iters': 0, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 422, results.text - assert 'Should be greater or equal than 100000' in results.text, results.text - - -def test_try_to_create_an_encrypted_dataset_with_inherit_encryption_true(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'passphrase': 'my_passphrase', - }, - 'encryption': True, - 'inherit_encryption': True - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 422, results.text - assert 'Must be disabled when encryption is enabled' in results.text, results.text - - -def test_verify_pool_encrypted_dataset_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_try_to_create_an_encrypted_dataset_with_passphrase_and_generate_key(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'generate_key': True, - 'passphrase': 'my_passphrase', - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 422, results.text - assert 'Must be disabled when dataset is to be encrypted with passphrase' in results.text, results.text - - -def test_create_an_encrypted_root_with_generate_key(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'generate_key': True, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - - -def test_delete_generate_key_encrypted_root(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_an_encrypted_root_with_a_key(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'key': dataset_token_hex, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - - -def test_verify_pool_encrypted_root_dataset_does_not_leak_encryption_key_into_middleware_log(request): - cmd = f"""grep -R "{dataset_token_hex}" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_make_sure_we_are_not_able_to_lock_key_encrypted_dataset(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'lock_options': { - 'force_umount': True - } - } - results = POST('/pool/dataset/lock', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'FAILED', str(job_status['results']) - assert 'Only datasets which are encrypted with passphrase can be locked' in job_status['results']['error'], \ - job_status['results']['error'] - - -def test_change_a_key_encrypted_dataset_to_passphrase(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'change_key_options': { - 'passphrase': 'my_passphrase' - } - } - results = POST('/pool/dataset/change_key/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_verify_that_the_dataset_changed_to_passphrase(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'PASSPHRASE', results.text - - -def test_verify_pool_dataset_change_key_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_lock_passphrase_encrypted_datasets_and_ensure_they_get_locked(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'lock_options': { - 'force_umount': True - } - } - results = POST('/pool/dataset/lock', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_verify_passphrase_encrypted_root_is_locked(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['key_format'] == 'PASSPHRASE', str(job_status_result) - assert dictionary['unlock_successful'] is False, str(job_status_result) - assert dictionary['locked'] is True, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_unlock_passphrase_encrypted_datasets_with_wrong_passphrase(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'unlock_options': { - 'recursive': True, - 'datasets': [ - { - 'name': dataset, - 'passphrase': 'bad_passphrase' - } - ] - } - } - results = POST('/pool/dataset/unlock/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - assert job_status['results']['result']['failed'][dataset]['error'] == 'Invalid Key', str(job_status['results']) - - -def test_verify_passphrase_encrypted_root_still_locked(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['key_format'] == 'PASSPHRASE', str(job_status_result) - assert dictionary['unlock_successful'] is False, str(job_status_result) - assert dictionary['locked'] is True, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_unlock_passphrase_encrypted_datasets(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'unlock_options': { - 'recursive': True, - 'datasets': [ - { - 'name': dataset, - 'passphrase': 'my_passphrase' - } - ] - } - } - results = POST('/pool/dataset/unlock/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - assert job_status['results']['result']['unlocked'] == [dataset], str(job_status['results']) - - -def test_verify_passphrase_encrypted_root_is_unlocked(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['key_format'] == 'PASSPHRASE', str(job_status_result) - assert dictionary['unlock_successful'] is True, str(job_status_result) - assert dictionary['locked'] is False, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_delete_encrypted_dataset(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_delete_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'cascade': True, - 'restart_services': True, - 'destroy': True - } - results = POST(f'/pool/id/{pool_id}/export/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_create_a_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - global pool_id - payload = { - 'name': encrypted_pool_name, - 'encryption': True, - 'encryption_options': { - 'algorithm': 'AES-128-CCM', - 'passphrase': 'my_pool_passphrase', - }, - 'topology': { - 'data': [ - {'type': 'STRIPE', 'disks': pool_disks} - ], - }, - "allow_duplicate_serials": True, - } - results = POST('/pool/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 240) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - pool_id = job_status['results']['result']['id'] - - -def test_verify_pool_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_pool_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_verify_the_pool_dataset_is_passphrase_encrypted_and_algorithm_encryption(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{encrypted_pool_name}/') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'PASSPHRASE', results.text - assert results.json()['encryption_algorithm']['value'] == 'AES-128-CCM', results.text - - -def test_create_a_passphrase_encrypted_root_on_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'generate_key': False, - 'pbkdf2iters': 100000, - 'algorithm': 'AES-128-CCM', - 'passphrase': 'my_passphrase', - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_verify_pool_encrypted_root_dataset_change_key_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_try_to_change_a_passphrase_encrypted_root_to_key_on_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'change_key_options': { - 'key': dataset_token_hex, - } - } - results = POST('/pool/dataset/change_key/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'FAILED', str(job_status['results']) - - -def test_verify_pool_dataset_change_key_does_not_leak_passphrase_into_middleware_log_after_key_change(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_delete_encrypted_dataset_from_encrypted_root_on_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_a_dataset_to_inherit_encryption_from_the_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'inherit_encryption': True - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'PASSPHRASE', results.text - - -def test_delete_encrypted_dataset_from_the_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_try_to_create_an_encrypted_root_with_generate_key_on_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'generate_key': True, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 422, results.text - - -def test_try_to_create_an_encrypted_root_with_key_on_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'key': dataset_token_hex, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 422, results.text - - -def test_verify_pool_key_encrypted_dataset_does_not_leak_encryption_key_into_middleware_log(request): - cmd = f"""grep -R "{dataset_token_hex}" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_delete_the_passphrase_encrypted_pool_with_is_datasets(request): - depends(request, ['CREATED_POOL']) - payload = { - 'cascade': True, - 'restart_services': True, - 'destroy': True - } - results = POST(f'/pool/id/{pool_id}/export/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_creating_a_key_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - global pool_id - payload = { - 'name': encrypted_pool_name, - 'encryption': True, - 'encryption_options': { - 'algorithm': 'AES-128-CCM', - 'key': pool_token_hex, - }, - 'topology': { - 'data': [ - {'type': 'STRIPE', 'disks': pool_disks} - ], - }, - "allow_duplicate_serials": True, - } - results = POST('/pool/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 240) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - pool_id = job_status['results']['result']['id'] - - -def test_verify_pool_does_not_leak_encryption_key_into_middleware_log(request): - cmd = f"""grep -R "{pool_token_hex}" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_verify_the_pool_dataset_is_hex_key_encrypted_and_algorithm_encryption(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{encrypted_pool_name}/') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - assert results.json()['encryption_algorithm']['value'] == 'AES-128-CCM', results.text - - -def test_creating_a_key_encrypted_root_on_key_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'key': dataset_token_hex, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - - -def test_verify_pool_dataset_does_not_leak_encryption_hex_key_into_middleware_log(request): - cmd = f"""grep -R "{dataset_token_hex}" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_change_a_key_encrypted_root_to_passphrase_on_key_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'change_key_options': { - 'passphrase': 'my_passphrase', - } - } - results = POST('/pool/dataset/change_key/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_verify_pool_encrypted_root_key_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_verify_the_dataset_changed_to_passphrase(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'PASSPHRASE', results.text - - -def test_lock_passphrase_encrypted_dataset(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'lock_options': { - 'force_umount': True - } - } - results = POST('/pool/dataset/lock', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_verify_the_dataset_is_locked(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['locked'] is True, results.text - - -def test_verify_passphrase_encrypted_root_unlock_successful_is_false(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['unlock_successful'] is False, str(job_status_result) - assert dictionary['locked'] is True, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_unlock_passphrase_key_encrypted_datasets(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'unlock_options': { - 'recursive': True, - 'datasets': [ - { - 'name': dataset, - 'passphrase': 'my_passphrase' - } - ] - } - } - results = POST('/pool/dataset/unlock/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - assert job_status['results']['result']['unlocked'] == [dataset], str(job_status['results']) - - -def test_verify_pool_dataset_unlock_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_verify_passphrase_key_encrypted_root_is_unlocked(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['unlock_successful'] is True, str(job_status_result) - assert dictionary['locked'] is False, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_delete_passphrase_key_encrypted_dataset(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_an_dataset_with_inherit_encryption_from_the_key_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'inherit_encryption': True - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - - -def test_delete_inherit_encryption_from_the_key_encrypted_pool_dataset(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_an_encrypted_dataset_with_generate_key_on_key_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'generate_key': True, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_delete_generate_key_encrypted_dataset(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_a_passphrase_encrypted_root_dataset_parrent(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'passphrase': 'my_passphrase', - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_verify_pool_passphrase_encrypted_root_dataset_parrent_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_create_a_passphrase_encrypted_root_child_of_passphrase_parent(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': child_dataset, - 'encryption_options': { - 'passphrase': 'my_passphrase2', - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_verify_encrypted_root_child_of_passphrase_parent_dataset_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase2" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_lock_passphrase_encrypted_root_with_is_child(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - } - results = POST('/pool/dataset/lock', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_verify_the_parrent_encrypted_root_unlock_successful_is_false(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['unlock_successful'] is False, str(job_status_result) - assert dictionary['locked'] is True, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_verify_the_parrent_encrypted_root_dataset_is_locked(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['locked'] is True, results.text - - -def test_verify_the_chid_of_the_encrypted_root_parent_unlock_successful_is_false(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': child_dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == child_dataset: - assert dictionary['unlock_successful'] is False, str(job_status_result) - assert dictionary['locked'] is True, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_verify_the_child_dataset_is_locked(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{child_dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['locked'] is True, results.text - - -def test_try_to_unlock_the_child_of_lock_parent_encrypted_root(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': child_dataset, - 'unlock_options': { - 'recursive': True, - 'datasets': [ - { - 'name': child_dataset, - 'passphrase': 'my_passphrase2' - } - ] - } - } - results = POST('/pool/dataset/unlock/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'FAILED', str(job_status['results']) - assert f'{child_dataset} has locked parents' in str(job_status['results']), str(job_status['results']) - assert job_status['results']['result'] is None, str(job_status['results']) - - -def test_verify_child_of_lock_parent_encrypted_root_dataset_unlock_do_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase2" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_Verify_chid_unlock_successful_is_still_false(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': child_dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == child_dataset: - assert dictionary['unlock_successful'] is False, str(job_status_result) - assert dictionary['locked'] is True, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_unlock_parent_dataset_with_child_recursively(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'unlock_options': { - 'recursive': True, - 'datasets': [ - { - 'name': dataset, - 'passphrase': 'my_passphrase' - }, - { - 'name': child_dataset, - 'passphrase': 'my_passphrase2' - } - ] - } - } - results = POST('/pool/dataset/unlock/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - assert job_status['results']['result']['unlocked'] == [dataset, child_dataset], str(job_status['results']) - - -def test_verify_pool_dataset_unlock_with_child_dataset_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - cmd = """grep -R "my_passphrase2" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_verify_the_parent_dataset_unlock_successful_is_true(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['unlock_successful'] is True, str(job_status_result) - assert dictionary['locked'] is False, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_verify_the_dataset_is_unlocked(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{child_dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['locked'] is False, results.text - - -def test_verify_the_child_dataset_unlock_successful_is_true(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': child_dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == child_dataset: - assert dictionary['unlock_successful'] is True, str(job_status_result) - assert dictionary['locked'] is False, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_verify_the_child_dataset_is_unlocked(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{child_dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['locked'] is False, results.text - - -def test_delete_dataset_with_is_child_recursive(request): - depends(request, ['CREATED_POOL']) - payload = { - "recursive": True, - } - results = DELETE(f'/pool/dataset/id/{dataset_url}/', payload) - assert results.status_code == 200, results.text - - -def test_creating_a_key_encrypted_dataset_on_key_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'key': dataset_token_hex, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_verify_pool_encrypted_dataset_on_key_encrypted_pool_does_not_leak_encryption_key_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_create_a_passphrase_encrypted_root_from_key_encrypted_root(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': child_dataset, - 'encryption_options': { - 'passphrase': 'my_passphrase', - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_verify_ncrypted_root_from_key_encrypted_root_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_verify_the_new_passprase_encrypted_root_is_passphrase(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{child_dataset_url}') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'PASSPHRASE', results.text - - -def test_run_inherit_parent_encryption_properties_on_the_passprase(request): - depends(request, ['CREATED_POOL']) - results = POST('/pool/dataset/inherit_parent_encryption_properties', child_dataset) - assert results.status_code == 200, results.text - - -def test_verify_the_the_child_got_props_by_the_parent_root(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{child_dataset_url}') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - - -def test_delete_the_key_encrypted_pool_with_all_the_dataset(request): - depends(request, ['CREATED_POOL']) - payload = { - 'cascade': True, - 'restart_services': True, - 'destroy': True - } - results = POST(f'/pool/id/{pool_id}/export/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) diff --git a/tests/api2/test_344_acl_templates.py b/tests/api2/test_344_acl_templates.py deleted file mode 100644 index a91f1159a0271..0000000000000 --- a/tests/api2/test_344_acl_templates.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/env python3 - -import pytest -import sys -import os -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import POST, GET, PUT, DELETE -from auto_config import pool_name - - -@pytest.mark.dependency(name="ACLTEMPLATE_DATASETS_CREATED") -@pytest.mark.parametrize('acltype', ['NFSV4', 'POSIX']) -def test_01_create_test_datasets(request, acltype): - """ - Setup of datasets for testing templates. - This test shouldn't fail unless pool.dataset endpoint is - thoroughly broken. - """ - result = POST( - '/pool/dataset/', { - 'name': f'{pool_name}/acltemplate_{acltype.lower()}', - 'acltype': acltype, - 'aclmode': 'DISCARD' if acltype == 'POSIX' else 'PASSTHROUGH' - } - ) - - assert result.status_code == 200, result.text - - -@pytest.mark.parametrize('acltype', ['NFSV4', 'POSIX']) -def test_02_check_builtin_types_by_path(request, acltype): - """ - This test verifies that we can query builtins by paths, and - that the acltype of the builtins matches that of the - underlying path. - """ - depends(request, ["ACLTEMPLATE_DATASETS_CREATED"], scope="session") - expected_acltype = 'POSIX1E' if acltype == 'POSIX' else 'NFS4' - payload = { - 'path': f'/mnt/{pool_name}/acltemplate_{acltype.lower()}', - } - results = POST('/filesystem/acltemplate/by_path', payload) - assert results.status_code == 200, results.text - for entry in results.json(): - assert entry['builtin'], results.text - assert entry['acltype'] == expected_acltype, results.text - - payload['format-options'] = { - 'resolve_names': True, - 'ensure_builtins': True - } - - results = POST('/filesystem/acltemplate/by_path', payload) - assert results.status_code == 200, results.text - for entry in results.json(): - for ace in entry['acl']: - if ace['tag'] not in ('USER_OBJ', 'GROUP_OBJ', 'USER', 'GROUP'): - continue - - assert ace.get('who') is not None, results.text - - -@pytest.mark.dependency(name="NEW_ACLTEMPLATES_CREATED") -@pytest.mark.parametrize('acltype', ['NFS4', 'POSIX']) -def test_03_create_new_template(request, acltype): - """ - This method queries an existing builtin and creates a - new acltemplate based on the data. Test of new ACL template - insertion. - """ - depends(request, ["ACLTEMPLATE_DATASETS_CREATED"], scope="session") - results = GET( - '/filesystem/acltemplate', payload={ - 'query-filters': [['name', '=', f'{acltype}_RESTRICTED']], - 'query-options': {'get': True}, - } - ) - assert results.status_code == 200, results.text - - acl = results.json()['acl'] - for entry in acl: - if entry['id'] is None: - entry['id'] = -1 - - payload = { - 'name': f'{acltype}_TEST', - 'acl': acl, - 'acltype': results.json()['acltype'] - } - - results = POST('/filesystem/acltemplate', payload) - assert results.status_code == 200, results.text - - -def test_04_legacy_check_default_acl_choices(request): - """ - Verify that our new templates appear as choices for "default" ACLs. - """ - depends(request, ["NEW_ACLTEMPLATES_CREATED"], scope="session") - - results = GET( - '/filesystem/acltemplate', payload={ - 'query-filters': [['builtin', '=', False]], - } - ) - assert results.status_code == 200, results.text - - names = [x['name'] for x in results.json()] - - results = POST('/filesystem/default_acl_choices') - assert results.status_code == 200, results.text - acl_choices = results.json() - - for name in names: - assert name in acl_choices, results.text - - -@pytest.mark.parametrize('acltype', ['NFS4', 'POSIX']) -def test_05_legacy_check_default_acl_choices_by_path(request, acltype): - """ - Verify that our new templates appear as choices for "default" ACLs - given a path. - """ - depends(request, ["NEW_ACLTEMPLATES_CREATED"], scope="session") - inverse = 'POSIX' if acltype == 'NFS4' else 'NFS4' - - path = f'/mnt/{pool_name}/acltemplate_{"posix" if acltype == "POSIX" else "nfsv4"}' - results = POST('/filesystem/default_acl_choices', payload=path) - assert results.status_code == 200, results.text - - choices = results.json() - assert f'{acltype}_TEST' in choices, results.text - assert f'{inverse}_TEST' not in choices, results.text - - -@pytest.mark.dependency(name="NEW_ACLTEMPLATES_UPDATED") -@pytest.mark.parametrize('acltype', ['NFS4', 'POSIX']) -def test_09_update_new_template(request, acltype): - """ - Rename the template we created to validated that `update` - method works. - """ - depends(request, ["NEW_ACLTEMPLATES_CREATED"], scope="session") - results = GET( - '/filesystem/acltemplate', payload={ - 'query-filters': [['name', '=', f'{acltype}_TEST']], - 'query-options': {'get': True}, - } - ) - - assert results.status_code == 200, results.text - - payload = results.json() - id = payload.pop('id') - payload.pop('builtin') - payload['name'] = f'{payload["name"]}2' - - results = PUT(f'/filesystem/acltemplate/id/{id}/', payload) - assert results.status_code == 200, results.text - - -@pytest.mark.parametrize('acltype', ['NFS4', 'POSIX']) -def test_10_delete_new_template(request, acltype): - depends(request, ["NEW_ACLTEMPLATES_UPDATED"], scope="session") - results = GET( - '/filesystem/acltemplate', payload={ - 'query-filters': [['name', '=', f'{acltype}_TEST2']], - 'query-options': {'get': True}, - } - ) - assert results.status_code == 200, results.text - - results = DELETE(f'/filesystem/acltemplate/id/{results.json()["id"]}') - assert results.status_code == 200, results.text - - -def test_40_knownfail_builtin_delete(request): - results = GET( - '/filesystem/acltemplate', payload={ - 'query-filters': [['builtin', '=', True]], - 'query-options': {'get': True}, - } - ) - assert results.status_code == 200, results.text - id = results.json()['id'] - - results = DELETE(f'/filesystem/acltemplate/id/{id}') - assert results.status_code == 422, results.text - - -def test_41_knownfail_builtin_update(request): - results = GET( - '/filesystem/acltemplate', payload={ - 'query-filters': [['builtin', '=', True]], - 'query-options': {'get': True}, - } - ) - assert results.status_code == 200, results.text - payload = results.json() - id = payload.pop('id') - payload.pop('builtin') - payload['name'] = 'CANARY' - - results = PUT(f'/filesystem/acltemplate/id/{id}/', payload) - assert results.status_code == 422, results.text - - -@pytest.mark.parametrize('acltype', ['NFSV4', 'POSIX']) -def test_50_delete_test1_dataset(request, acltype): - depends(request, ["ACLTEMPLATE_DATASETS_CREATED"], scope="session") - dataset_name = f'{pool_name}/acltemplate_{acltype.lower()}' - results = DELETE(f'/pool/dataset/id/{dataset_name.replace("/", "%2F")}/') - assert results.status_code == 200, results.text diff --git a/tests/api2/test_345_acl_nfs4.py b/tests/api2/test_345_acl_nfs4.py deleted file mode 100644 index f38e3ab93c7f2..0000000000000 --- a/tests/api2/test_345_acl_nfs4.py +++ /dev/null @@ -1,946 +0,0 @@ -#!/usr/bin/env python3 - -# License: BSD - -import secrets -import string -import sys -import os -import pytest -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import DELETE, GET, POST, SSH_TEST, wait_on_job -from auto_config import pool_name -from pytest_dependency import depends -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.account import user as create_user -from middlewared.test.integration.assets.pool import dataset as make_dataset -from middlewared.test.integration.utils import call, ssh - - -shell = '/usr/bin/bash' -group = 'nogroup' -ACLTEST_DATASET_NAME = 'acltest' -ACLTEST_DATASET = f'{pool_name}/{ACLTEST_DATASET_NAME}' -dataset_url = ACLTEST_DATASET.replace('/', '%2F') - -ACLTEST_SUBDATASET = f'{pool_name}/acltest/sub1' -subdataset_url = ACLTEST_SUBDATASET.replace('/', '%2F') -getfaclcmd = "nfs4xdr_getfacl" -setfaclcmd = "nfs4xdr_setfacl" -group0 = "root" - -ACL_USER = 'acluser' -ACL_PWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) - -base_permset = { - "READ_DATA": False, - "WRITE_DATA": False, - "APPEND_DATA": False, - "READ_NAMED_ATTRS": False, - "WRITE_NAMED_ATTRS": False, - "EXECUTE": False, - "DELETE_CHILD": False, - "READ_ATTRIBUTES": False, - "WRITE_ATTRIBUTES": False, - "DELETE": False, - "READ_ACL": False, - "WRITE_ACL": False, - "WRITE_OWNER": False, - "SYNCHRONIZE": True -} - -base_flagset = { - "FILE_INHERIT": False, - "DIRECTORY_INHERIT": False, - "NO_PROPAGATE_INHERIT": False, - "INHERIT_ONLY": False, - "INHERITED": False -} - -BASIC_PERMS = ["READ", "TRAVERSE", "MODIFY", "FULL_CONTROL"] -BASIC_FLAGS = ["INHERIT", "NOINHERIT"] -TEST_FLAGS = [ - 'DIRECTORY_INHERIT', - 'FILE_INHERIT', - 'INHERIT_ONLY', - 'NO_PROPAGATE_INHERIT' -] - -INHERIT_FLAGS_BASIC = { - "FILE_INHERIT": True, - "DIRECTORY_INHERIT": True, - "NO_PROPAGATE_INHERIT": False, - "INHERIT_ONLY": False, - "INHERITED": False -} - -INHERIT_FLAGS_ADVANCED = { - "FILE_INHERIT": True, - "DIRECTORY_INHERIT": True, - "NO_PROPAGATE_INHERIT": True, - "INHERIT_ONLY": True, - "INHERITED": False -} - -default_acl = [ - { - "tag": "owner@", - "id": None, - "type": "ALLOW", - "perms": {"BASIC": "FULL_CONTROL"}, - "flags": {"BASIC": "INHERIT"} - }, - { - "tag": "group@", - "id": None, - "type": "ALLOW", - "perms": {"BASIC": "FULL_CONTROL"}, - "flags": {"BASIC": "INHERIT"} - } -] - -function_testing_acl_deny = [ - { - "tag": "owner@", - "id": None, - "type": "ALLOW", - "perms": {"BASIC": "FULL_CONTROL"}, - "flags": {"BASIC": "INHERIT"} - }, - { - "tag": "group@", - "id": None, - "type": "ALLOW", - "perms": {"BASIC": "FULL_CONTROL"}, - "flags": {"BASIC": "INHERIT"} - }, - { - "tag": "everyone@", - "id": None, - "type": "ALLOW", - "perms": {"BASIC": "FULL_CONTROL"}, - "flags": {"BASIC": "INHERIT"} - }, -] - -function_testing_acl_allow = [ - { - "tag": "owner@", - "id": None, - "type": "ALLOW", - "perms": {"BASIC": "FULL_CONTROL"}, - "flags": {"BASIC": "INHERIT"} - }, - { - "tag": "group@", - "id": None, - "type": "ALLOW", - "perms": {"BASIC": "FULL_CONTROL"}, - "flags": {"BASIC": "INHERIT"} - } -] - -# base64-encoded samba DOSATTRIB xattr -DOSATTRIB_XATTR = "CTB4MTAAAAMAAwAAABEAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABimX3sSqfTAQAAAAAAAAAACg==" - -IMPLEMENTED_DENY = [ - "WRITE_ATTRIBUTES", - "DELETE", - "DELETE_CHILD", - "FULL_DELETE", - "EXECUTE", - "READ_DATA", - "WRITE_DATA", - "READ_ACL", - "WRITE_ACL", - "WRITE_OWNER", -] - -IMPLEMENTED_ALLOW = [ - "READ_DATA", - "WRITE_DATA", - "DELETE", - "DELETE_CHILD", - "EXECUTE", - "WRITE_OWNER", - "READ_ACL", - "WRITE_ACL", -] - -TEST_INFO = {} - - -@pytest.fixture(scope='module') -def initialize_for_acl_tests(request): - with make_dataset(ACLTEST_DATASET_NAME, data={'acltype': 'NFSV4', 'aclmode': 'RESTRICTED'}) as ds: - with create_user({ - 'username': ACL_USER, - 'full_name': ACL_USER, - 'group_create': True, - 'ssh_password_enabled': True, - 'password': ACL_PWD - }) as u: - TEST_INFO.update({ - 'dataset': ds, - 'dataset_path': os.path.join('/mnt', ds), - 'user': u - }) - yield request - - -@pytest.mark.dependency(name='HAS_NFS4_ACLS') -def test_02_create_dataset(initialize_for_acl_tests): - acl = call('filesystem.getacl', TEST_INFO['dataset_path']) - assert acl['acltype'] == 'NFS4' - - -def test_04_basic_set_acl_for_dataset(request): - depends(request, ["HAS_NFS4_ACLS"]) - call('pool.dataset.permission', TEST_INFO['dataset'], { - 'acl': default_acl, - 'group': group, - 'user': 'nobody' - }, job=True) - - acl_result = call('filesystem.getacl', TEST_INFO['dataset_path'], True) - for key in ['tag', 'type', 'perms', 'flags']: - assert acl_result['acl'][0][key] == default_acl[0][key], str(acl_result) - assert acl_result['acl'][1][key] == default_acl[1][key], str(acl_result) - - assert acl_result['uid'] == 65534, str(acl_result) - - -""" -At this point very basic functionality of API endpoint is verified. -Proceed to more rigorous testing of basic and advanced permissions. -These tests will only manipulate the first entry in the default ACL (owner@). -Each test will iterate through all available options for that particular -variation (BASIC/ADVANCED permissions, BASIC/ADVANCED flags). -""" - - -@pytest.mark.parametrize('permset', BASIC_PERMS) -def test_08_set_basic_permsets(request, permset): - depends(request, ["HAS_NFS4_ACLS"]) - default_acl[0]['perms']['BASIC'] = permset - - call('filesystem.setacl', {'path': TEST_INFO['dataset_path'], 'dacl': default_acl}, job=True) - acl_result = call('filesystem.getacl', TEST_INFO['dataset_path'], True) - requested_perms = default_acl[0]['perms'] - received_perms = acl_result['acl'][0]['perms'] - assert requested_perms == received_perms, str(acl_result) - - -@pytest.mark.parametrize('flagset', BASIC_FLAGS) -def test_09_set_basic_flagsets(request, flagset): - depends(request, ["HAS_NFS4_ACLS"]) - default_acl[0]['flags']['BASIC'] = flagset - - call('filesystem.setacl', {'path': TEST_INFO['dataset_path'], 'dacl': default_acl}, job=True) - acl_result = call('filesystem.getacl', TEST_INFO['dataset_path'], True) - requested_flags = default_acl[0]['flags'] - received_flags = acl_result['acl'][0]['flags'] - assert requested_flags == received_flags, str(acl_result) - - -@pytest.mark.parametrize('perm', base_permset.keys()) -def test_10_set_advanced_permset(request, perm): - depends(request, ["HAS_NFS4_ACLS"]) - for key in ['perms', 'flags']: - if default_acl[0][key].get('BASIC'): - default_acl[0][key].pop('BASIC') - - default_acl[0]['flags'] = base_flagset.copy() - default_acl[0]['perms'] = base_permset.copy() - default_acl[0]['perms'][perm] = True - - call('filesystem.setacl', {'path': TEST_INFO['dataset_path'], 'dacl': default_acl}, job=True) - acl_result = call('filesystem.getacl', TEST_INFO['dataset_path'], True) - requested_perms = default_acl[0]['perms'] - received_perms = acl_result['acl'][0]['perms'] - assert requested_perms == received_perms, str(acl_result) - - -@pytest.mark.parametrize('flag', TEST_FLAGS) -def test_11_set_advanced_flagset(request, flag): - depends(request, ["HAS_NFS4_ACLS"]) - default_acl[0]['flags'] = base_flagset.copy() - default_acl[0]['flags'][flag] = True - if flag in ['INHERIT_ONLY', 'NO_PROPAGATE_INHERIT']: - default_acl[0]['flags']['DIRECTORY_INHERIT'] = True - - call('filesystem.setacl', {'path': TEST_INFO['dataset_path'], 'dacl': default_acl}, job=True) - acl_result = call('filesystem.getacl', TEST_INFO['dataset_path'], True) - requested_flags = default_acl[0]['flags'] - received_flags = acl_result['acl'][0]['flags'] - assert requested_flags == received_flags, str(acl_result) - - -""" -This next series of tests verifies that ACLs are being inherited correctly. -We first create a child dataset to verify that ACLs do not change unless -'traverse' is set. -""" - - -def test_12_prepare_recursive_tests(request): - depends(request, ["HAS_NFS4_ACLS"], scope="session") - call('pool.dataset.create', {'name': ACLTEST_SUBDATASET, 'acltype': 'NFSV4'}) - - ssh(';'.join([ - f'mkdir -p /mnt/{ACLTEST_DATASET}/dir1/dir2', - f'touch /mnt/{ACLTEST_DATASET}/dir1/testfile', - f'touch /mnt/{ACLTEST_DATASET}/dir1/dir2/testfile' - ])) - - -def test_13_recursive_no_traverse(request): - depends(request, ["HAS_NFS4_ACLS"]) - default_acl[1]['perms'].pop('BASIC') - default_acl[1]['flags'].pop('BASIC') - default_acl[0]['flags'] = INHERIT_FLAGS_BASIC.copy() - default_acl[1]['flags'] = INHERIT_FLAGS_ADVANCED.copy() - - expected_flags_0 = INHERIT_FLAGS_BASIC.copy() - expected_flags_0['INHERITED'] = True - expected_flags_1 = base_flagset.copy() - expected_flags_1['INHERITED'] = True - - # get acl of child dataset. This should not change in this test - acl_result = call('filesystem.getacl', f'/mnt/{ACLTEST_SUBDATASET}', True) - init_acl = acl_result['acl'][0]['perms'] - - call('filesystem.setacl', { - 'path': TEST_INFO['dataset_path'], - 'dacl': default_acl, - 'uid': 65534, - 'options': {'recursive': True} - }, job=True) - - # Verify that it hasn't changed - acl_result = call('filesystem.getacl', f'/mnt/{ACLTEST_SUBDATASET}', True) - fin_acl = acl_result['acl'][0]['perms'] - assert init_acl == fin_acl, str(acl_result) - - # check on dir 1. Entry 1 should have INHERIT flag added, and - # INHERIT_ONLY should be set to False at this depth. - acl_result = call('filesystem.getacl', f'/mnt/{ACLTEST_DATASET}/dir1', False) - theacl = acl_result['acl'] - assert theacl[0]['flags'] == expected_flags_0, acl_result - assert theacl[1]['flags'] == expected_flags_1, acl_result - - # Verify that user was changed on subdirectory - assert acl_result['uid'] == 65534, acl_result - - # check on dir 2 - the no propogate inherit flag should have taken - # effect and ACL length should be 1 - acl_result = call('filesystem.getacl', f'/mnt/{ACLTEST_DATASET}/dir1/dir2', False) - theacl = acl_result['acl'] - assert theacl[0]['flags'] == expected_flags_0, acl_result - assert len(theacl) == 1, acl_result - - # Verify that user was changed two deep - assert acl_result['uid'] == 65534, acl_result - - -def test_14_recursive_with_traverse(request): - depends(request, ["HAS_NFS4_ACLS"]) - expected_flags_0 = INHERIT_FLAGS_BASIC.copy() - expected_flags_0['INHERITED'] = True - expected_flags_1 = base_flagset.copy() - expected_flags_1['INHERITED'] = True - - call('filesystem.setacl', { - 'path': TEST_INFO['dataset_path'], - 'dacl': default_acl, - 'uid': 65534, - 'options': {'recursive': True, 'traverse': True} - }, job=True) - - acl_result = call('filesystem.getacl', f'/mnt/{ACLTEST_SUBDATASET}', True) - theacl = acl_result['acl'] - assert theacl[0]['flags'] == expected_flags_0, acl_result - assert theacl[1]['flags'] == expected_flags_1, acl_result - - # Verify that user was changed - assert acl_result['uid'] == 65534, acl_result - - -def test_15_strip_acl_from_dataset(request): - depends(request, ["HAS_NFS4_ACLS"]) - call('filesystem.setperm', { - 'path': TEST_INFO['dataset_path'], - 'mode': '777', - 'uid': 65534, - 'options': {'stripacl': True, 'recursive': True} - }, job=True) - - assert call('filesystem.stat', f'/mnt/{ACLTEST_SUBDATASET}')['acl'] is True - - st = call('filesystem.stat', f'/mnt/{ACLTEST_DATASET}') - assert st['acl'] is False, str(st) - assert oct(st['mode']) == '0o40777', str(st) - - st = call('filesystem.stat', f'/mnt/{ACLTEST_DATASET}/dir1') - assert st['acl'] is False, str(st) - assert oct(st['mode']) == '0o40777', str(st) - - st = call('filesystem.stat', f'/mnt/{ACLTEST_DATASET}/dir1/testfile') - assert st['acl'] is False, str(st) - assert oct(st['mode']) == '0o100777', str(st) - - -def test_20_delete_child_dataset(request): - depends(request, ["HAS_NFS4_ACLS"]) - result = DELETE( - f'/pool/dataset/id/{subdataset_url}/' - ) - assert result.status_code == 200, result.text - - -@pytest.mark.dependency(name="HAS_TESTFILE") -def test_22_prep_testfile(request): - depends(request, ["HAS_NFS4_ACLS"], scope="session") - ssh(f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt') - - -""" -The following tests verify that DENY ACEs are functioning correctly. -Deny ace will be prepended to base ACL that grants FULL_CONTROL. - -#define VREAD_NAMED_ATTRS 000000200000 /* not used */ -#define VWRITE_NAMED_ATTRS 000000400000 /* not used */ -#define VDELETE_CHILD 000001000000 -#define VREAD_ATTRIBUTES 000002000000 /* permission to stat(2) */ -#define VWRITE_ATTRIBUTES 000004000000 /* change {m,c,a}time */ -#define VDELETE 000010000000 -#define VREAD_ACL 000020000000 /* read ACL and file mode */ -#define VWRITE_ACL 000040000000 /* change ACL and/or file mode */ -#define VWRITE_OWNER 000100000000 /* change file owner */ -#define VSYNCHRONIZE 000200000000 /* not used */ - -Some tests must be skipped due to lack of implementation in VFS. -""" - - -@pytest.mark.parametrize('perm', IMPLEMENTED_DENY) -def test_23_test_acl_function_deny(perm, request): - """ - Iterate through available permissions and prepend - deny ACE denying that particular permission to the - acltest user, then attempt to perform an action that - should result in failure. - """ - depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session") - - if perm == "FULL_DELETE": - to_deny = {"DELETE_CHILD": True, "DELETE": True} - else: - to_deny = {perm: True} - - payload_acl = [{ - "tag": "USER", - "id": TEST_INFO['user']['uid'], - "type": "DENY", - "perms": to_deny, - "flags": {"BASIC": "INHERIT"} - }] - - payload_acl.extend(function_testing_acl_deny) - call('filesystem.setacl', { - 'path': TEST_INFO['dataset_path'], - 'dacl': payload_acl, - 'gid': 0, 'uid': 0, - 'options': {'recursive': True}, - }, job=True) - - if perm == "EXECUTE": - cmd = f'cd /mnt/{ACLTEST_DATASET}' - - elif perm == "READ_ATTRIBUTES": - cmd = f'stat /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm in ["DELETE", "DELETE_CHILD", "FULL_DELETE"]: - cmd = f'rm -f /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "READ_DATA": - cmd = f'cat /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "WRITE_DATA": - cmd = f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "WRITE_ATTRIBUTES": - cmd = f'touch -a -m -t 201512180130.09 /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "READ_ACL": - cmd = f'{getfaclcmd} /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "WRITE_ACL": - cmd = f'{setfaclcmd} -b /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "WRITE_OWNER": - cmd = f'chown {ACL_USER} /mnt/{ACLTEST_DATASET}/acltest.txt' - - else: - # This should never happen. - cmd = "touch /var/empty/ERROR" - - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - """ - Per RFC5661 Section 6.2.1.3.2, deletion is permitted if either - DELETE_CHILD is permitted on parent, or DELETE is permitted on - file. This means that it should succeed when tested in isolation, - but fail when combined. - - Unfortunately, this is implemented differenting in FreeBSD vs Linux. - Former follows above recommendation, latter does not in that denial - of DELETE on file takes precedence over allow of DELETE_CHILD. - """ - errstr = f'cmd: {cmd}, res: {results["output"]}, to_deny {to_deny}' - expected_delete = ["DELETE_CHILD"] - if perm in expected_delete: - assert results['result'] is True, errstr - - # unfortunately, we now need to recreate our testfile. - ssh(f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt') - elif perm == "READ_ATTRIBUTES": - assert results['result'] is True, errstr - else: - assert results['result'] is False, errstr - - -@pytest.mark.parametrize('perm', IMPLEMENTED_ALLOW) -def test_24_test_acl_function_allow(perm, request): - """ - Iterate through available permissions and prepend - allow ACE permitting that particular permission to the - acltest user, then attempt to perform an action that - should result in success. - """ - depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session") - - """ - Some extra permissions bits must be set for these tests - EXECUTE so that we can traverse to the path in question - and READ_ATTRIBUTES because most of the utilites we use - for testing have to stat(2) the files. - """ - to_allow = {perm: True} - if perm != "EXECUTE": - to_allow["EXECUTE"] = True - - if perm != "READ_ATTRIBUTES": - to_allow["READ_ATTRIBUTES"] = True - - if perm == "WRITE_ACL": - to_allow["READ_ACL"] = True - - payload_acl = [{ - "tag": "USER", - "id": TEST_INFO['user']['uid'], - "type": "ALLOW", - "perms": to_allow, - "flags": {"BASIC": "INHERIT"} - }] - payload_acl.extend(function_testing_acl_allow) - - call('filesystem.setacl', { - 'path': TEST_INFO['dataset_path'], - 'dacl': payload_acl, - 'gid': 65534, 'uid': 0, - 'options': {'recursive': True}, - }, job=True) - - if perm == "EXECUTE": - cmd = f'cd /mnt/{ACLTEST_DATASET}' - - elif perm == "READ_ATTRIBUTES": - cmd = f'stat /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm in ["DELETE", "DELETE_CHILD", "FULL_DELETE"]: - cmd = f'rm /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "READ_DATA": - cmd = f'cat /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "WRITE_DATA": - cmd = f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "WRITE_ATTRIBUTES": - cmd = f'touch -a -m -t 201512180130.09 /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "READ_ACL": - cmd = f'{getfaclcmd} /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "WRITE_ACL": - cmd = f'{setfaclcmd} -x 0 /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "WRITE_OWNER": - cmd = f'chown {ACL_USER} /mnt/{ACLTEST_DATASET}/acltest.txt' - - else: - # This should never happen. - cmd = "touch /var/empty/ERROR" - - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}' - assert results['result'] is True, errstr - if perm in ["DELETE", "DELETE_CHILD"]: - # unfortunately, we now need to recreate our testfile. - ssh(f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt') - - -@pytest.mark.parametrize('perm', IMPLEMENTED_ALLOW) -def test_25_test_acl_function_omit(perm, request): - """ - Iterate through available permissions and add permissions - required for an explicit ALLOW of that ACE from the previous - test to succeed. This sets the stage to have success hinge - on presence of the particular permissions bit. Then we omit - it. This should result in a failure. - """ - depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session") - - """ - Some extra permissions bits must be set for these tests - EXECUTE so that we can traverse to the path in question - and READ_ATTRIBUTES because most of the utilites we use - for testing have to stat(2) the files. - """ - to_allow = {} - if perm != "EXECUTE": - to_allow["EXECUTE"] = True - - if perm != "READ_ATTRIBUTES": - to_allow["READ_ATTRIBUTES"] = True - - if perm == "WRITE_ACL": - to_allow["READ_ACL"] = True - - payload_acl = [{ - "tag": "USER", - "id": TEST_INFO['user']['uid'], - "type": "ALLOW", - "perms": to_allow, - "flags": {"BASIC": "INHERIT"} - }] - - payload_acl.extend(function_testing_acl_allow) - - call('filesystem.setacl', { - 'path': TEST_INFO['dataset_path'], - 'dacl': payload_acl, - 'gid': 65534, 'uid': 0, - 'options': {'recursive': True}, - }, job=True) - - if perm == "EXECUTE": - cmd = f'cd /mnt/{ACLTEST_DATASET}' - - elif perm == "READ_ATTRIBUTES": - cmd = f'stat /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm in ["DELETE", "DELETE_CHILD", "FULL_DELETE"]: - cmd = f'rm /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "READ_DATA": - cmd = f'cat /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "WRITE_DATA": - cmd = f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "WRITE_ATTRIBUTES": - cmd = f'touch -a -m -t 201512180130.09 /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "READ_ACL": - cmd = f'{getfaclcmd} /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "WRITE_ACL": - cmd = f'{setfaclcmd} -x 0 /mnt/{ACLTEST_DATASET}/acltest.txt' - - elif perm == "WRITE_OWNER": - cmd = f'chown {ACL_USER} /mnt/{ACLTEST_DATASET}/acltest.txt' - - else: - # This should never happen. - cmd = "touch /var/empty/ERROR" - - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}' - assert results['result'] is False, errstr - - -@pytest.mark.parametrize('perm', IMPLEMENTED_ALLOW) -def test_25_test_acl_function_allow_restrict(perm, request): - """ - Iterate through implemented allow permissions and verify that - they grant no more permissions than intended. Some bits cannot - be tested in isolation effectively using built in utilities. - """ - depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session") - - """ - Some extra permissions bits must be set for these tests - EXECUTE so that we can traverse to the path in question - and READ_ATTRIBUTES because most of the utilites we use - for testing have to stat(2) the files. - """ - to_allow = {} - tests_to_skip = [] - tests_to_skip.append(perm) - - if perm != "EXECUTE": - to_allow["EXECUTE"] = True - tests_to_skip.append("EXECUTE") - - if perm != "READ_ATTRIBUTES": - to_allow["READ_ATTRIBUTES"] = True - tests_to_skip.append("READ_ATTRIBUTES") - - if perm == "DELETE_CHILD": - tests_to_skip.append("DELETE") - - payload_acl = [{ - "tag": "USER", - "id": TEST_INFO['user']['uid'], - "type": "ALLOW", - "perms": to_allow, - "flags": {"BASIC": "INHERIT"} - }] - payload_acl.extend(function_testing_acl_allow) - call('filesystem.setacl', { - 'path': TEST_INFO['dataset_path'], - 'dacl': payload_acl, - 'gid': 65534, 'uid': 0, - 'options': {'recursive': True}, - }, job=True) - - if "EXECUTE" not in tests_to_skip: - cmd = f'cd /mnt/{ACLTEST_DATASET}' - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}' - assert results['result'] is False, errstr - - if "DELETE" not in tests_to_skip: - cmd = f'rm /mnt/{ACLTEST_DATASET}/acltest.txt' - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}' - assert results['result'] is False, errstr - if results['result'] is True: - # File must be re-created. Kernel ACL inheritance routine - # will ensure that new file has right ACL. - ssh(f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt') - - if "READ_DATA" not in tests_to_skip: - cmd = f'cat /mnt/{ACLTEST_DATASET}/acltest.txt' - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}' - assert results['result'] is False, errstr - - if "WRITE_DATA" not in tests_to_skip: - cmd = f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt' - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}' - assert results['result'] is False, errstr - - if "WRITE_ATTRIBUTES" not in tests_to_skip: - cmd = f'touch -a -m -t 201512180130.09 /mnt/{ACLTEST_DATASET}/acltest.txt' - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}' - assert results['result'] is False, errstr - - if "READ_ACL" not in tests_to_skip: - cmd = f'{getfaclcmd} /mnt/{ACLTEST_DATASET}/acltest.txt' - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}' - assert results['result'] is False, errstr - - if "WRITE_ACL" not in tests_to_skip: - cmd = f'{setfaclcmd} -x 0 /mnt/{ACLTEST_DATASET}/acltest.txt' - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}' - assert results['result'] is False, errstr - - if "WRITE_OWNER" not in tests_to_skip: - cmd = f'chown {ACL_USER} /mnt/{ACLTEST_DATASET}/acltest.txt' - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}' - assert results['result'] is False, errstr - - -def test_26_file_execute_deny(request): - """ - Base permset with everyone@ FULL_CONTROL, but ace added on - top explictly denying EXECUTE. Attempt to execute file should fail. - """ - depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session") - payload_acl = [ - { - "tag": "USER", - "id": TEST_INFO['user']['uid'], - "type": "DENY", - "perms": {"EXECUTE": True}, - "flags": {"FILE_INHERIT": True} - }, - { - "tag": "USER", - "id": TEST_INFO['user']['uid'], - "type": "ALLOW", - "perms": {"EXECUTE": True}, - "flags": {"BASIC": "NOINHERIT"} - }, - ] - payload_acl.extend(function_testing_acl_deny) - call('filesystem.setacl', { - 'path': TEST_INFO['dataset_path'], - 'dacl': payload_acl, - 'gid': 0, 'uid': 0, - 'options': {'recursive': True}, - }, job=True) - - ssh(f'echo "echo CANARY" > /mnt/{ACLTEST_DATASET}/acltest.txt') - - cmd = f'/mnt/{ACLTEST_DATASET}/acltest.txt' - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {payload_acl}' - assert results['result'] is False, errstr - - -def test_27_file_execute_allow(request): - """ - Verify that setting execute allows file execution. READ_DATA and - READ_ATTRIBUTES are also granted beecause we need to be able to - stat and read our test script. - """ - depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session") - payload_acl = [ - { - "tag": "USER", - "id": TEST_INFO['user']['uid'], - "type": "ALLOW", - "perms": { - "EXECUTE": True, - "READ_DATA": True, - "READ_ATTRIBUTES": True - }, - "flags": {"FILE_INHERIT": True} - }, - { - "tag": "USER", - "id": TEST_INFO['user']['uid'], - "type": "ALLOW", - "perms": {"EXECUTE": True}, - "flags": {"BASIC": "NOINHERIT"} - }, - ] - payload_acl.extend(function_testing_acl_allow) - call('filesystem.setacl', { - 'path': TEST_INFO['dataset_path'], - 'dacl': payload_acl, - 'gid': 0, 'uid': 0, - 'options': {'recursive': True}, - }, job=True) - - ssh(f'echo "echo CANARY" > /mnt/{ACLTEST_DATASET}/acltest.txt') - - cmd = f'/mnt/{ACLTEST_DATASET}/acltest.txt' - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {payload_acl}' - assert results['result'] is True, errstr - - -def test_28_file_execute_omit(request): - """ - Grant user all permissions except EXECUTE. Attempt to execute - file should fail. - """ - depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session") - payload_acl = [ - { - "tag": "USER", - "id": TEST_INFO['user']['uid'], - "type": "ALLOW", - "perms": base_permset.copy(), - "flags": {"FILE_INHERIT": True} - }, - { - "tag": "USER", - "id": TEST_INFO['user']['uid'], - "type": "ALLOW", - "perms": {"EXECUTE": True}, - "flags": {"BASIC": "NOINHERIT"} - }, - ] - payload_acl.extend(function_testing_acl_allow) - # at this point the user's ACE has all perms set - # remove execute. - payload_acl[0]['perms']['EXECUTE'] = False - call('filesystem.setacl', { - 'path': TEST_INFO['dataset_path'], - 'dacl': payload_acl, - 'gid': 0, 'uid': 0, - 'options': {'recursive': True}, - }, job=True) - - ssh(f'echo "echo CANARY" > /mnt/{ACLTEST_DATASET}/acltest.txt') - - cmd = f'/mnt/{ACLTEST_DATASET}/acltest.txt' - results = SSH_TEST(cmd, ACL_USER, ACL_PWD) - errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {payload_acl}' - assert results['result'] is False, errstr - - -def test_29_owner_restrictions(request): - depends(request, ["HAS_NFS4_ACLS"], scope="session") - - payload_acl = [{ - "tag": "owner@", - "id": -1, - "type": "ALLOW", - "perms": {"BASIC": "READ"}, - "flags": {"BASIC": "INHERIT"} - }] - call('filesystem.setacl', { - 'path': TEST_INFO['dataset_path'], - 'dacl': payload_acl, - 'gid': 0, 'uid': TEST_INFO['user']['uid'], - 'options': {'recursive': True}, - }, job=True) - - results = ssh( - f'mkdir /mnt/{ACLTEST_DATASET}/dir1/dir_should_not_exist', - complete_response=True, check=False, - user=ACL_USER, password=ACL_PWD - ) - - assert results['result'] is False, str(results) - - results = ssh( - f'touch /mnt/{ACLTEST_DATASET}/dir1/file_should_not_exist', - complete_response=True, check=False, - user=ACL_USER, password=ACL_PWD - ) - - assert results['result'] is False, str(results) - - -def test_30_acl_inherit_nested_dataset(): - with make_dataset("acl_test_inherit1", data={'share_type': 'SMB'}) as ds1: - call('filesystem.add_to_acl', { - 'path': os.path.join('/mnt', ds1), - 'entries': [{'id_type': 'GROUP', 'id': 666, 'access': 'READ'}] - }, job=True) - - acl1 = call('filesystem.getacl', os.path.join('/mnt', ds1)) - assert any(x['id'] == 666 for x in acl1['acl']) - - with pytest.raises(ValidationErrors): - # ACL on parent dataset prevents adding APPS group to ACL. Fail. - with make_dataset("acl_test_inherit1/acl_test_inherit2", data={'share_type': 'APPS'}): - pass - - with make_dataset("acl_test_inherit1/acl_test_inherit2", data={'share_type': 'NFS'}) as ds2: - acl2 = call('filesystem.getacl', os.path.join('/mnt', ds2)) - assert acl1['acltype'] == acl2['acltype'] - assert any(x['id'] == 666 for x in acl2['acl']) diff --git a/tests/api2/test_347_posix_mode.py b/tests/api2/test_347_posix_mode.py deleted file mode 100644 index c7fb7cce2b72e..0000000000000 --- a/tests/api2/test_347_posix_mode.py +++ /dev/null @@ -1,656 +0,0 @@ -#!/usr/bin/env python3 - -# License: BSD - -import sys -import os -import pytest -import stat -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import DELETE, GET, POST, SSH_TEST, wait_on_job -from auto_config import pool_name, user, password -from pytest_dependency import depends - -MODE_DATASET = f'{pool_name}/modetest' -dataset_url = MODE_DATASET.replace('/', '%2F') - -MODE_SUBDATASET = f'{pool_name}/modetest/sub1' -subdataset_url = MODE_SUBDATASET.replace('/', '%2F') - -OWNER_BITS = { - "OWNER_READ": stat.S_IRUSR, - "OWNER_WRITE": stat.S_IWUSR, - "OWNER_EXECUTE": stat.S_IXUSR, -} - -GROUP_BITS = { - "GROUP_READ": stat.S_IRGRP, - "GROUP_WRITE": stat.S_IWGRP, - "GROUP_EXECUTE": stat.S_IXGRP, -} - -OTHER_BITS = { - "OTHER_READ": stat.S_IROTH, - "OTHER_WRITE": stat.S_IWOTH, - "OTHER_EXECUTE": stat.S_IXOTH, -} - -MODE = {**OWNER_BITS, **GROUP_BITS, **OTHER_BITS} - -MODE_USER = "modetesting" -MODE_GROUP = "modetestgrp" -MODE_PWD = "modetesting" - - -def test_01_check_dataset_endpoint(): - assert isinstance(GET('/pool/dataset/').json(), list) - - -@pytest.mark.dependency(name="DATASET_CREATED") -def test_02_create_dataset(request): - result = POST( - '/pool/dataset/', { - 'name': MODE_DATASET - } - ) - assert result.status_code == 200, result.text - - -@pytest.mark.dependency(name="IS_TRIVIAL") -def test_03_verify_acl_is_trivial(request): - depends(request, ["DATASET_CREATED"]) - results = POST('/filesystem/stat/', f'/mnt/{MODE_DATASET}') - assert results.status_code == 200, results.text - assert results.json()['acl'] is False, results.text - - -@pytest.mark.parametrize('mode_bit', MODE.keys()) -def test_04_verify_setting_mode_bits_nonrecursive(request, mode_bit): - """ - This test iterates through possible POSIX permissions bits and - verifies that they are properly set on the remote server. - """ - depends(request, ["IS_TRIVIAL"]) - new_mode = f"{MODE[mode_bit]:03o}" - result = POST( - f'/pool/dataset/id/{dataset_url}/permission/', { - 'acl': [], - 'mode': new_mode, - 'group': 'nogroup', - 'user': 'nobody' - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - results = POST('/filesystem/stat/', f'/mnt/{MODE_DATASET}') - assert results.status_code == 200, results.text - server_mode = f"{stat.S_IMODE(results.json()['mode']):03o}" - assert new_mode == server_mode, results.text - - -@pytest.mark.dependency(name="RECURSIVE_PREPARED") -def test_05_prepare_recursive_tests(request): - depends(request, ["IS_TRIVIAL"], scope="session") - result = POST( - '/pool/dataset/', { - 'name': MODE_SUBDATASET - } - ) - assert result.status_code == 200, result.text - - cmd = f'mkdir -p /mnt/{MODE_DATASET}/dir1/dir2' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - cmd = f'touch /mnt/{MODE_DATASET}/dir1/dir2/testfile' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - results = POST('/filesystem/stat/', f'/mnt/{MODE_SUBDATASET}') - assert results.status_code == 200, results.text - current_mode = results.json()['mode'] - # new datasets should be created with 755 permissions" - assert f"{stat.S_IMODE(current_mode):03o}" == "755", results.text - - -@pytest.mark.parametrize('mode_bit', MODE.keys()) -def test_06_verify_setting_mode_bits_recursive_no_traverse(request, mode_bit): - """ - Perform recursive permissions change and verify new mode written - to files and subdirectories. - """ - depends(request, ["RECURSIVE_PREPARED"]) - new_mode = f"{MODE[mode_bit]:03o}" - result = POST( - f'/pool/dataset/id/{dataset_url}/permission/', { - 'acl': [], - 'mode': new_mode, - 'group': 'nogroup', - 'user': 'nobody', - 'options': {'recursive': True} - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - results = POST('/filesystem/stat/', f'/mnt/{MODE_DATASET}') - assert results.status_code == 200, results.text - server_mode = f"{stat.S_IMODE(results.json()['mode']):03o}" - assert new_mode == server_mode, results.text - - results = POST('/filesystem/stat/', f'/mnt/{MODE_DATASET}/dir1/dir2') - assert results.status_code == 200, results.text - server_mode = f"{stat.S_IMODE(results.json()['mode']):03o}" - assert new_mode == server_mode, results.text - - results = POST('/filesystem/stat/', - f'/mnt/{MODE_DATASET}/dir1/dir2/testfile') - assert results.status_code == 200, results.text - server_mode = f"{stat.S_IMODE(results.json()['mode']):03o}" - assert new_mode == server_mode, results.text - - -def test_07_verify_mode_not_set_on_child_dataset(request): - depends(request, ["RECURSIVE_PREPARED"]) - results = POST('/filesystem/stat/', f'/mnt/{MODE_SUBDATASET}') - assert results.status_code == 200, results.text - current_mode = results.json()['mode'] - # new datasets should be created with 755 permissions" - assert f"{stat.S_IMODE(current_mode):03o}" == "755", results.text - - -def test_08_verify_traverse_to_child_dataset(request): - depends(request, ["RECURSIVE_PREPARED"]) - result = POST( - f'/pool/dataset/id/{dataset_url}/permission/', { - 'acl': [], - 'mode': 777, - 'group': 'nogroup', - 'user': 'nobody', - 'options': {'recursive': True, 'traverse': True} - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - results = POST('/filesystem/stat/', f'/mnt/{MODE_SUBDATASET}') - assert results.status_code == 200, results.text - current_mode = results.json()['mode'] - assert f"{stat.S_IMODE(current_mode):03o}" == "777", results.text - - -""" -Create user and group for testing function of POSIX permission bits. -""" - - -@pytest.mark.dependency(name="GROUP_CREATED") -def test_09_create_test_group(request): - depends(request, ["IS_TRIVIAL"]) - global next_gid - global groupid - results = GET('/group/get_next_gid/') - assert results.status_code == 200, results.text - next_gid = results.json() - global groupid - payload = { - "gid": next_gid, - "name": MODE_GROUP, - } - results = POST("/group/", payload) - assert results.status_code == 200, results.text - groupid = results.json() - - -@pytest.mark.dependency(name="USER_CREATED") -def test_10_creating_shareuser_to_test_acls(request): - depends(request, ["GROUP_CREATED"]) - global modeuser_id - global next_uid - results = GET('/user/get_next_uid/') - assert results.status_code == 200, results.text - next_uid = results.json() - payload = { - "username": MODE_USER, - "full_name": "Mode User", - "group_create": True, - "password": MODE_PWD, - "uid": next_uid, - "groups": [groupid], - "shell": '/usr/bin/bash', - "ssh_password_enabled": True, - } - results = POST("/user/", payload) - assert results.status_code == 200, results.text - modeuser_id = results.json() - - -""" -Next series of tests are for correct behavior of POSIX permissions -""" - - -def dir_mode_check(mode_bit): - if mode_bit.endswith("READ"): - cmd = f'ls /mnt/{MODE_DATASET}' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is True, results['output'] - - cmd = f'touch /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - cmd = f'cd /mnt/{MODE_DATASET}' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - elif mode_bit.endswith("WRITE"): - cmd = f'ls /mnt/{MODE_DATASET}' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - cmd = f'touch /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is True, results['output'] - - cmd = f'rm /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is True, results['output'] - - elif mode_bit.endswith("EXECUTE"): - cmd = f'ls /mnt/{MODE_DATASET}' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - # Ensure that file is deleted before trying to create - cmd = f'rm /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, user, password) - - cmd = f'touch /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - -def file_mode_check(mode_bit): - if mode_bit.endswith("READ"): - cmd = f'cat /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is True, results['output'] - assert results['stdout'].strip() == "echo CANARY", results['output'] - - cmd = f'echo "FAIL" >> /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - cmd = f'/mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - elif mode_bit.endswith("WRITE"): - cmd = f'cat /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - cmd = f'echo "SUCCESS" > /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is True, results['output'] - - cmd = f'/mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - """ - Parent directory does not have write bit set. This - means rm should fail even though WRITE is set for user. - """ - cmd = f'rm /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - cmd = f'echo "echo CANARY" > /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - elif mode_bit.endswith("EXECUTE"): - cmd = f'cat /mnt/{MODE_DATASET}' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - cmd = f'echo "FAIL" > /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - -def file_mode_check_xor(mode_bit): - """ - when this method is called, all permissions bits are set except for - the one being tested. - """ - if mode_bit.endswith("READ"): - cmd = f'cat /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - elif mode_bit.endswith("WRITE"): - cmd = f'echo "SUCCESS" > /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - elif mode_bit.endswith("EXECUTE"): - cmd = f'/mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, MODE_USER, MODE_PWD) - assert results['result'] is False, results['output'] - - -@pytest.mark.parametrize('mode_bit', OWNER_BITS.keys()) -def test_11_test_directory_owner_bits_function_allow(mode_bit, request): - """ - Verify mode behavior correct when it's the only bit set. - In case of directory, Execute must be set concurrently with write - in order to verify correct write behavior. - """ - depends(request, ["USER_CREATED"], scope="session") - new_mode = MODE[mode_bit] - if new_mode == stat.S_IWUSR: - new_mode |= stat.S_IXUSR - - result = POST( - f'/pool/dataset/id/{dataset_url}/permission/', { - 'acl': [], - 'mode': f'{new_mode:03o}', - 'group': 'nogroup', - 'user': MODE_USER - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - if job_status['state'] != 'SUCCESS': - return - - dir_mode_check(mode_bit) - - -@pytest.mark.parametrize('mode_bit', GROUP_BITS.keys()) -def test_12_test_directory_group_bits_function_allow(mode_bit, request): - """ - Verify mode behavior correct when it's the only bit set. - In case of directory, Execute must be set concurrently with write - in order to verify correct write behavior. - """ - depends(request, ["USER_CREATED"], scope="session") - new_mode = MODE[mode_bit] - if new_mode == stat.S_IWGRP: - new_mode |= stat.S_IXGRP - - result = POST( - f'/pool/dataset/id/{dataset_url}/permission/', { - 'acl': [], - 'mode': f'{new_mode:03o}', - 'group': MODE_GROUP, - 'user': 'root' - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - if job_status['state'] != 'SUCCESS': - return - - dir_mode_check(mode_bit) - - -@pytest.mark.parametrize('mode_bit', OTHER_BITS.keys()) -def test_13_test_directory_other_bits_function_allow(mode_bit, request): - """ - Verify mode behavior correct when it's the only bit set. - In case of directory, Execute must be set concurrently with write - in order to verify correct write behavior. - """ - depends(request, ["USER_CREATED"], scope="session") - new_mode = MODE[mode_bit] - if new_mode == stat.S_IWOTH: - new_mode |= stat.S_IXOTH - - result = POST( - f'/pool/dataset/id/{dataset_url}/permission/', { - 'acl': [], - 'mode': f'{new_mode:03o}', - 'group': 'root', - 'user': 'root' - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - if job_status['state'] != 'SUCCESS': - return - - dir_mode_check(mode_bit) - - -def test_14_setup_file_test(request): - depends(request, ["USER_CREATED"], scope="session") - result = POST( - '/filesystem/setperm/', { - 'path': f'/mnt/{MODE_DATASET}', - 'mode': "001", - 'gid': 0, - 'uid': 0, - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - cmd = f'echo "echo CANARY" > /mnt/{MODE_DATASET}/canary' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - -@pytest.mark.parametrize('mode_bit', OWNER_BITS.keys()) -def test_15_test_file_owner_bits_function_allow(mode_bit, request): - """ - Verify mode behavior correct when it's the only bit set. - """ - depends(request, ["USER_CREATED"], scope="session") - new_mode = MODE[mode_bit] - - result = POST( - '/filesystem/setperm/', { - 'path': f'/mnt/{MODE_DATASET}/canary', - 'mode': f'{new_mode:03o}', - 'gid': 0, - 'uid': next_uid - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - if job_status['state'] != 'SUCCESS': - return - - file_mode_check(mode_bit) - - -@pytest.mark.parametrize('mode_bit', GROUP_BITS.keys()) -def test_16_test_file_group_bits_function_allow(mode_bit, request): - """ - Verify mode behavior correct when it's the only bit set. - """ - depends(request, ["USER_CREATED"], scope="session") - new_mode = MODE[mode_bit] - - result = POST( - '/filesystem/setperm/', { - 'path': f'/mnt/{MODE_DATASET}/canary', - 'mode': f'{new_mode:03o}', - 'gid': next_gid, - 'uid': 0, - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - if job_status['state'] != 'SUCCESS': - return - - file_mode_check(mode_bit) - - -@pytest.mark.parametrize('mode_bit', OTHER_BITS.keys()) -def test_17_test_file_other_bits_function_allow(mode_bit, request): - """ - Verify mode behavior correct when it's the only bit set. - """ - depends(request, ["USER_CREATED"], scope="session") - new_mode = MODE[mode_bit] - - result = POST( - '/filesystem/setperm/', { - 'path': f'/mnt/{MODE_DATASET}/canary', - 'mode': f'{new_mode:03o}', - 'gid': 0, - 'uid': 0, - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - if job_status['state'] != 'SUCCESS': - return - - file_mode_check(mode_bit) - - -@pytest.mark.parametrize('mode_bit', OWNER_BITS.keys()) -def test_18_test_file_owner_bits_xor(mode_bit, request): - """ - Verify mode behavior correct when it's the only bit set. - """ - depends(request, ["USER_CREATED"], scope="session") - new_mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO - new_mode = new_mode ^ MODE[mode_bit] - - result = POST( - '/filesystem/setperm/', { - 'path': f'/mnt/{MODE_DATASET}/canary', - 'mode': f'{new_mode:03o}', - 'gid': 0, - 'uid': next_uid - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - if job_status['state'] != 'SUCCESS': - return - - file_mode_check_xor(mode_bit) - - -@pytest.mark.parametrize('mode_bit', GROUP_BITS.keys()) -def test_19_test_file_group_bits_xor(mode_bit, request): - """ - Verify mode behavior correct when it's the only bit set. - """ - depends(request, ["USER_CREATED"], scope="session") - new_mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO - new_mode = new_mode ^ MODE[mode_bit] - - result = POST( - '/filesystem/setperm/', { - 'path': f'/mnt/{MODE_DATASET}/canary', - 'mode': f'{new_mode:03o}', - 'gid': next_gid, - 'uid': 0 - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - if job_status['state'] != 'SUCCESS': - return - - file_mode_check_xor(mode_bit) - - -@pytest.mark.parametrize('mode_bit', OTHER_BITS.keys()) -def test_20_test_file_other_bits_xor(mode_bit, request): - """ - Verify mode behavior correct when it's the only bit set. - """ - depends(request, ["USER_CREATED"], scope="session") - new_mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO - new_mode = new_mode ^ MODE[mode_bit] - - result = POST( - '/filesystem/setperm/', { - 'path': f'/mnt/{MODE_DATASET}/canary', - 'mode': f'{new_mode:03o}', - 'gid': 0, - 'uid': 0 - } - ) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - if job_status['state'] != 'SUCCESS': - return - - file_mode_check_xor(mode_bit) - - -def test_21_delete_child_dataset(request): - depends(request, ["RECURSIVE_PREPARED"]) - result = DELETE( - f'/pool/dataset/id/{subdataset_url}/' - ) - assert result.status_code == 200, result.text - - -def test_22_delete_group(request): - depends(request, ["GROUP_CREATED"]) - results = DELETE(f"/group/id/{groupid}/", {"delete_users": True}) - assert results.status_code == 200, results.text - - -def test_23_delete_user(request): - depends(request, ["USER_CREATED"]) - results = DELETE(f"/user/id/{modeuser_id}/", {"delete_group": True}) - assert results.status_code == 200, results.text - - -def test_24_delete_dataset(request): - depends(request, ["DATASET_CREATED"]) - result = DELETE( - f'/pool/dataset/id/{dataset_url}/' - ) - assert result.status_code == 200, result.text diff --git a/tests/api2/test_348_posix_acl.py b/tests/api2/test_348_posix_acl.py deleted file mode 100644 index bb6825e522d8c..0000000000000 --- a/tests/api2/test_348_posix_acl.py +++ /dev/null @@ -1,535 +0,0 @@ -#!/usr/bin/env python3 - -# License: BSD - -import sys -import os -import enum -import pytest -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import DELETE, GET, POST, SSH_TEST, wait_on_job -from auto_config import pool_name, user, password -from pytest_dependency import depends - - -ACLTEST_DATASET = f'{pool_name}/posixacltest' -DATASET_URL = ACLTEST_DATASET.replace('/', '%2F') - -ACLTEST_SUBDATASET = f'{pool_name}/posixacltest/sub1' -SUBDATASET_URL = ACLTEST_SUBDATASET.replace('/', '%2F') - -permset_empty = {"READ": False, "WRITE": False, "EXECUTE": False} -permset_full = {"READ": True, "WRITE": True, "EXECUTE": True} - -tags = { - "USER_OBJ": {"mask_required": False}, - "GROUP_OBJ": {"mask_required": False}, - "MASK": {"mask_required": False}, - "USER": {"mask_required": True}, - "GROUP": {"mask_required": True}, - "OTHER": {"mask_required": False}, -} - - -class ACLBrand(enum.Enum): - ACCESS = enum.auto() - DEFAULT = enum.auto() - - def getacl(self, perms=None): - """ - Default to 770 unless permissions explicitly specified. - """ - - out = [ - { - "tag": "USER_OBJ", - "id": -1, - "perms": perms if perms else permset_full.copy(), - "default": self.name == "DEFAULT", - }, - { - "tag": "GROUP_OBJ", - "id": -1, - "perms": perms if perms else permset_full.copy(), - "default": self.name == "DEFAULT", - }, - { - "tag": "OTHER", - "id": -1, - "perms": perms if perms else permset_empty.copy(), - "default": self.name == "DEFAULT", - } - ] - return out - - -default_acl = ACLBrand.ACCESS.getacl() - -JOB_ID = None - - -def test_01_check_dataset_endpoint(): - assert isinstance(GET('/pool/dataset/').json(), list) - - -@pytest.mark.dependency(name="DATASET_CREATED") -def test_02_create_dataset(request): - result = POST( - '/pool/dataset/', { - 'name': ACLTEST_DATASET, - 'acltype': 'POSIX', - 'aclmode': 'DISCARD', - } - ) - assert result.status_code == 200, result.text - - -@pytest.mark.dependency(name="HAS_POSIX_ACLS") -def test_03_get_acltype(request): - """ - This test verifies that our dataset was created - successfully and that the acltype is POSIX1E, - which should be default for a "generic" dataset. - """ - depends(request, ["DATASET_CREATED"]) - global results - payload = { - 'path': f'/mnt/{ACLTEST_DATASET}', - } - result = POST('/filesystem/getacl/', payload) - assert result.status_code == 200, results.text - assert result.json()['acltype'] == 'POSIX1E', results.text - - -def test_04_basic_set_acl_for_dataset(request): - """ - This test verifies that we can set a trivial - POSIX1E ACL through the setacl endpoint. - """ - depends(request, ["HAS_POSIX_ACLS"]) - payload = { - 'path': f'/mnt/{ACLTEST_DATASET}', - 'dacl': ACLBrand.ACCESS.getacl(), - 'gid': 65534, - 'uid': 65534, - 'acltype': 'POSIX1E' - } - - result = POST('/filesystem/setacl/', payload) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_05_get_filesystem_getacl(request): - depends(request, ["HAS_POSIX_ACLS"]) - global results - payload = { - 'path': f'/mnt/{ACLTEST_DATASET}', - } - results = POST('/filesystem/getacl/', payload) - assert results.status_code == 200, results.text - - -@pytest.mark.parametrize('key', ['tag', 'perms']) -def test_06_verify_filesystem_getacl(request, key): - """ - This test verifies that our payload in above test was - correctly applied and that the resulting ACL is reported as trivial. - """ - depends(request, ["HAS_POSIX_ACLS"]) - assert results.json()['acl'][0][key] == default_acl[0][key], results.text - assert results.json()['acl'][1][key] == default_acl[1][key], results.text - assert results.json()['acl'][2][key] == default_acl[2][key], results.text - assert results.json()['trivial'], results.text - - -def test_07_verify_setacl_chown(request): - """ - This test verifies that the UID and GID from setacl - payload were applied correctly. When a dataset is created, - UID and GID will be 0. - """ - depends(request, ["HAS_POSIX_ACLS"]) - assert results.json()['uid'] == 65534, results.text - assert results.json()['gid'] == 65534, results.text - - -""" -At this point very basic functionality of API endpoint is verified. -Proceed to more rigorous testing of permissions. -""" - - -@pytest.mark.parametrize('perm', ["READ", "WRITE", "EXECUTE"]) -def test_08_set_perms(request, perm): - """ - Validation that READ, WRITE, EXECUTE are set correctly via endpoint. - OTHER entry is used for this purpose. - """ - depends(request, ["HAS_POSIX_ACLS"]) - payload = { - 'path': f'/mnt/{ACLTEST_DATASET}', - 'dacl': ACLBrand.ACCESS.getacl(), - 'acltype': 'POSIX1E' - } - payload['dacl'][2]['perms'][perm] = True - result = POST('/filesystem/setacl/', payload) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - results = POST('/filesystem/getacl/', - {'path': f'/mnt/{ACLTEST_DATASET}'}) - assert results.status_code == 200, results.text - received_perms = results.json()['acl'][2]['perms'] - assert received_perms[perm], results.text - - -@pytest.mark.parametrize('tag', tags.keys()) -def test_09_set_tags(request, tag): - """ - Validation that entries for all tag types can be set correctly. - In case of USER_OBJ, GROUP_OBJ, and OTHER, the existing entry - is modified to match our test permset. USER and GROUP (named) - entries are set for id 1000 (user / group need not exist for - this to succeed). Named entries require an additional mask entry. - """ - depends(request, ["HAS_POSIX_ACLS"]) - test_permset = {"READ": True, "WRITE": False, "EXECUTE": True} - must_add = True - - payload = { - 'path': f'/mnt/{ACLTEST_DATASET}', - 'dacl': ACLBrand.ACCESS.getacl(), - 'acltype': 'POSIX1E' - } - for entry in payload['dacl']: - if entry['tag'] == tag: - entry['perms'] = test_permset - must_add = False - break - - if must_add: - new_entry = { - 'tag': tag, - 'perms': test_permset, - 'id': 1000, - 'default': False, - } - if tag == 'MASK': - new_entry['id'] = -1 - # POSIX ACLs are quite particular about - # ACE ordering. We do this on backend. - # MASK comes before OTHER. - payload['dacl'].insert(2, new_entry) - elif tag == 'USER': - payload['dacl'].insert(1, new_entry) - elif tag == 'GROUP': - payload['dacl'].insert(2, new_entry) - - if tags[tag]['mask_required']: - new_entry = { - 'tag': "MASK", - 'perms': test_permset, - 'id': -1, - 'default': False, - } - payload['dacl'].insert(3, new_entry) - - results = POST('/filesystem/setacl/', payload) - assert results.status_code == 200, results.text - JOB_ID = results.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - results = POST('/filesystem/getacl/', - {'path': f'/mnt/{ACLTEST_DATASET}'}) - assert results.status_code == 200, results.text - new_acl = results.json()['acl'] - assert payload['dacl'] == new_acl, results.text - - -@pytest.mark.parametrize('tag', tags.keys()) -def test_10_set_tags_default(request, tag): - """ - Validation that entries for all tag types can be set correctly. - In case of USER_OBJ, GROUP_OBJ, and OTHER, the existing entry - is modified to match our test permset. USER and GROUP (named) - entries are set for id 1000 (user / group need not exist for - this to succeed). Named entries require an additional mask entry. - This particular test covers "default" entries in POSIX1E ACL. - """ - depends(request, ["HAS_POSIX_ACLS"]) - test_permset = {"READ": True, "WRITE": False, "EXECUTE": True} - must_add = True - - payload = { - 'path': f'/mnt/{ACLTEST_DATASET}', - 'dacl': ACLBrand.ACCESS.getacl(), - 'acltype': 'POSIX1E', - } - default = ACLBrand.DEFAULT.getacl() - for entry in default: - if entry['tag'] == tag: - entry['perms'] = test_permset - must_add = False - - if must_add: - new_entry = { - 'tag': tag, - 'perms': test_permset, - 'id': 1000, - 'default': True, - } - if tag == 'MASK': - new_entry['id'] = -1 - # POSIX ACLs are quite particular about - # ACE ordering. We do this on backend. - # MASK comes before OTHER. - default.insert(2, new_entry) - elif tag == 'USER': - default.insert(1, new_entry) - elif tag == 'GROUP': - default.insert(2, new_entry) - - if tags[tag]['mask_required']: - new_entry = { - 'tag': "MASK", - 'perms': test_permset, - 'id': -1, - 'default': True, - } - default.insert(3, new_entry) - - payload['dacl'].extend(default) - results = POST('/filesystem/setacl/', payload) - assert results.status_code == 200, results.text - JOB_ID = results.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - results = POST('/filesystem/getacl/', - {'path': f'/mnt/{ACLTEST_DATASET}'}) - assert results.status_code == 200, results.text - new_acl = results.json() - assert payload['dacl'] == new_acl['acl'], results.text - assert new_acl['trivial'] is False, results.text - - -def test_11_non_recursive_acl_strip(request): - """ - Verify that non-recursive ACL strip works correctly. - We do this by checking result of subsequent getacl - request on the path (it should report that it is "trivial"). - """ - depends(request, ["HAS_POSIX_ACLS"]) - - payload = { - 'path': f'/mnt/{ACLTEST_DATASET}', - 'dacl': [], - 'acltype': 'POSIX1E', - 'options': {'stripacl': True}, - } - result = POST('/filesystem/setacl/', payload) - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - results = POST('/filesystem/getacl/', - {'path': f'/mnt/{ACLTEST_DATASET}'}) - assert results.status_code == 200, results.text - new_acl = results.json() - assert new_acl['trivial'], results.text - - -""" -This next series of tests verifies that ACLs are being inherited correctly. -We first create a child dataset to verify that ACLs do not change unless -'traverse' is set. -""" - - -def test_12_prepare_recursive_tests(request): - depends(request, ["HAS_POSIX_ACLS"], scope="session") - result = POST( - '/pool/dataset/', { - 'name': ACLTEST_SUBDATASET, - 'acltype': 'POSIX', - 'aclmode': 'DISCARD', - } - ) - assert result.status_code == 200, result.text - - cmd = f'mkdir -p /mnt/{ACLTEST_DATASET}/dir1/dir2' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - cmd = f'touch /mnt/{ACLTEST_DATASET}/dir1/testfile' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - cmd = f'touch /mnt/{ACLTEST_DATASET}/dir1/dir2/testfile' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - -def test_13_recursive_no_traverse(request): - """ - Test that ACL is recursively applied correctly, but does - not affect mountpoint of child dataset. - - In this case, access ACL will have 750 for dataset mountpoint, - and default ACL will have 777. Recusively applying will grant - 777 for access and default. - """ - depends(request, ["HAS_POSIX_ACLS"]) - - payload = { - 'path': f'/mnt/{ACLTEST_DATASET}', - 'gid': 65534, - 'uid': 65534, - 'dacl': ACLBrand.ACCESS.getacl(), - 'acltype': 'POSIX1E', - 'options': {'recursive': True}, - } - new_perms = {"READ": True, "WRITE": True, "EXECUTE": True} - default = ACLBrand.DEFAULT.getacl(new_perms) - - payload['dacl'].extend(default) - result = POST('/filesystem/setacl/', payload) - - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - # Verify that it hasn't changed. Should still report as trivial. - results = POST('/filesystem/getacl/', - {'path': f'/mnt/{ACLTEST_SUBDATASET}'}) - assert results.status_code == 200, results.text - new_acl = results.json() - assert new_acl['trivial'], results.text - - results2 = POST('/filesystem/getacl/', - {'path': f'/mnt/{ACLTEST_DATASET}/dir1'}) - - assert results2.status_code == 200, results.text - # Verify that user was changed on subdirectory - assert results2.json()['uid'] == 65534, results.text - - assert results2.status_code == 200, results.text - theacl = results2.json() - assert theacl['trivial'] is False, results.text - for entry in theacl['acl']: - assert entry['perms'] == new_perms, results.text - - -def test_14_recursive_with_traverse(request): - """ - This test verifies that setting `traverse = True` - will allow setacl operation to cross mountpoints. - """ - depends(request, ["HAS_POSIX_ACLS"]) - - payload = { - 'gid': 65534, - 'uid': 65534, - 'path': f'/mnt/{ACLTEST_DATASET}', - 'dacl': ACLBrand.ACCESS.getacl(), - 'acltype': 'POSIX1E', - 'options': {'recursive': True, 'traverse': True}, - } - default = ACLBrand.DEFAULT.getacl({"READ": True, "WRITE": True, "EXECUTE": True}) - - payload['dacl'].extend(default) - result = POST('/filesystem/setacl/', payload) - - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - results = POST('/filesystem/getacl/', - {'path': f'/mnt/{ACLTEST_SUBDATASET}'}) - assert results.status_code == 200, results.text - - new_acl = results.json() - assert new_acl['trivial'] is False, results.text - - # Verify that user was changed - assert results.json()['uid'] == 65534, results.text - - -def test_15_strip_acl_from_dataset(request): - """ - Strip ACL via pool.dataset.permission endpoint. - This should work even for POSIX1E ACLs. - """ - depends(request, ["HAS_POSIX_ACLS"]) - result = POST( - f'/pool/dataset/id/{DATASET_URL}/permission/', { - 'acl': [], - 'mode': '777', - 'options': {'stripacl': True, 'recursive': True} - } - ) - - assert result.status_code == 200, result.text - JOB_ID = result.json() - job_status = wait_on_job(JOB_ID, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -""" -The next four tests check that we've remotved the ACL from the -mountpoint, a subdirectory, and a file. These are all potentially -different cases for where we can fail to strip an ACL. -""" - - -def test_16_filesystem_acl_is_not_removed_child_dataset(request): - depends(request, ["HAS_POSIX_ACLS"]) - results = POST('/filesystem/stat/', f'/mnt/{ACLTEST_SUBDATASET}') - assert results.status_code == 200, results.text - assert results.json()['acl'] is True, results.text - - -def test_17_filesystem_acl_is_removed_mountpoint(request): - depends(request, ["HAS_POSIX_ACLS"]) - results = POST('/filesystem/stat/', f'/mnt/{ACLTEST_DATASET}') - assert results.status_code == 200, results.text - assert results.json()['acl'] is False, results.text - assert oct(results.json()['mode']) == '0o40777', results.text - - -def test_18_filesystem_acl_is_removed_subdir(request): - depends(request, ["HAS_POSIX_ACLS"]) - results = POST('/filesystem/stat/', f'/mnt/{ACLTEST_DATASET}/dir1') - assert results.status_code == 200, results.text - assert results.json()['acl'] is False, results.text - assert oct(results.json()['mode']) == '0o40777', results.text - - -def test_19_filesystem_acl_is_removed_file(request): - depends(request, ["HAS_POSIX_ACLS"]) - results = POST('/filesystem/stat/', - f'/mnt/{ACLTEST_DATASET}/dir1/testfile') - assert results.status_code == 200, results.text - assert results.json()['acl'] is False, results.text - assert oct(results.json()['mode']) == '0o100777', results.text - - -def test_20_delete_child_dataset(request): - depends(request, ["HAS_POSIX_ACLS"]) - result = DELETE( - f'/pool/dataset/id/{SUBDATASET_URL}/' - ) - assert result.status_code == 200, result.text - - -def test_30_delete_dataset(request): - result = DELETE( - f'/pool/dataset/id/{DATASET_URL}/' - ) - assert result.status_code == 200, result.text diff --git a/tests/api2/test_350_pool_dataset_quota_alert.py b/tests/api2/test_350_pool_dataset_quota_alert.py deleted file mode 100644 index 2d5bbc89a83a6..0000000000000 --- a/tests/api2/test_350_pool_dataset_quota_alert.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python3 - -# License: BSD - -import sys -import os -import re -import pytest -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import DELETE, GET, POST, SSH_TEST -from auto_config import pool_name, user, password - -G = 1024 * 1024 * 1024 - - -@pytest.mark.parametrize("datasets,expected_alerts", [ - ( - { - "": { - "used": 900, - "quota": 1 * G, - } - }, - [ - {"formatted": r"Quota exceeded on dataset tank/quota_test. Used 8|9[0-9.]+% \(8|9[0-9.]+ MiB of 1 GiB\)."}, - ] - ), - ( - { - "": { - "used": 118, - "quota": 10 * G, - "refquota": 1 * G, - } - }, - [ - # There was a false positive: - # {"formatted": r"Quota exceeded on dataset tank/quota_test. Used 91.[0-9]+% \(9.[0-9]+ GiB of 10 GiB\)."}, - ] - ), - ( - { - "": { - "used": 100, - "quota": 1000000000 * G, - } - }, - [ - # There should be no quota alerts if quota is set to a larger value than dataset size - ] - ), -]) -def test_dataset_quota_alert(request, datasets, expected_alerts): - assert "" in datasets - - try: - for dataset, params in datasets.items(): - used = params.pop("used", None) - - result = POST( - "/pool/dataset/", { - "name": f"{pool_name}/quota_test/{dataset}".rstrip("/"), - **params, - } - ) - assert result.status_code == 200, result.text - - if used is not None: - results = SSH_TEST(f'dd if=/dev/urandom of=/mnt/{pool_name}/quota_test/{dataset}/blob ' - f'bs=1M count={used}', user, password) - assert results['result'] is True, results - - results = SSH_TEST("midclt call alert.initialize", user, password) - assert results['result'] is True, results - - results = SSH_TEST("midclt call -job core.bulk alert.process_alerts '[[]]'", user, password) - assert results['result'] is True, results - - result = GET("/alert/list/") - assert result.status_code == 200, result.text - - alerts = [alert for alert in result.json() if alert["source"] == "Quota"] - - assert len(alerts) == len(expected_alerts), alerts - - for alert, expected_alert in zip(alerts, expected_alerts): - for k, v in expected_alert.items(): - if k == "formatted": - assert re.match(v, alert[k]), (alert, expected_alert, k) - else: - assert alert[k] == v, (alert, expected_alert, k) - finally: - result = DELETE(f"/pool/dataset/id/{pool_name}%2Fquota_test/", { - "recursive": True, - }) - assert result.status_code == 200, result.text diff --git a/tests/api2/test_360_pool_scrub.py b/tests/api2/test_360_pool_scrub.py deleted file mode 100644 index 5127536062374..0000000000000 --- a/tests/api2/test_360_pool_scrub.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 - -import pytest -import sys -import os -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET, PUT, POST, DELETE -from auto_config import pool_name - - -def test_01_create_scrub_for_same_pool(request): - global pool_id - pool_id = GET(f"/pool/?name={pool_name}").json()[0]["id"] - result = POST("/pool/scrub/", { - "pool": pool_id, - "threshold": 1, - "description": "", - "schedule": { - "minute": "00", - "hour": "00", - "dom": "1", - "month": "1", - "dow": "1", - }, - "enabled": True, - }) - assert result.status_code == 422, result.text - text = "A scrub with this pool already exists" - assert result.json()["pool_scrub_create.pool"][0]["message"] == text, result.text - - -def test_02_get_pool_name_scrub_id(request): - global scrub_id - result = GET(f"/pool/scrub/?pool_name={pool_name}") - assert result.status_code == 200, result.text - scrub_id = result.json()[0]['id'] - - -def test_03_update_scrub(request): - result = PUT(f"/pool/scrub/id/{scrub_id}/", { - "pool": pool_id, - "threshold": 2, - "description": "", - "schedule": { - "minute": "00", - "hour": "00", - "dom": "1", - "month": "1", - "dow": "1", - }, - "enabled": True, - }) - assert result.status_code == 200, result.text - - -def test_04_delete_scrub(request): - result = DELETE(f"/pool/scrub/id/{scrub_id}/") - assert result.status_code == 200, result.text - - -def test_05_create_scrub(request): - result = POST("/pool/scrub/", { - "pool": pool_id, - "threshold": 1, - "description": "", - "schedule": { - "minute": "00", - "hour": "00", - "dom": "1", - "month": "1", - "dow": "1", - }, - "enabled": True, - }) - assert result.status_code == 200, result.text diff --git a/tests/api2/test_410_smart.py b/tests/api2/test_410_smart.py deleted file mode 100644 index 34b7604dbdf7a..0000000000000 --- a/tests/api2/test_410_smart.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python3 -# License: BSD - -import os -import pytest -import sys -from time import sleep -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import DELETE, POST, PUT, GET -from auto_config import interface - -Reason = "VM detected no real ATA disk" - -not_real = ( - interface == "vtnet0" - or interface == "em0" - or 'enp0s' in interface -) - - -def test_05_enable_smartd_service_at_boot(): - results = GET('/service/?service=smartd') - smartid = results.json()[0]['id'] - - results = PUT(f'/service/id/{smartid}/', {"enable": True}) - assert results.status_code == 200, results.text - - -def test_06_look_smartd_service_at_boot(): - results = GET('/service/?service=smartd') - assert results.status_code == 200, results.text - assert results.json()[0]["enable"] is True, results.text - - -# Read test below only on real hardware -if not_real is False: - def test_07_starting_smartd_service(): - payload = {"service": "smartd"} - results = POST("/service/start/", payload) - assert results.status_code == 200, results.text - sleep(1) - - def test_08_checking_to_see_if_smartd_service_is_running(): - results = GET('/service/?service=smartd') - assert results.json()[0]["state"] == "RUNNING", results.text diff --git a/tests/api2/test_420_smb.py b/tests/api2/test_420_smb.py deleted file mode 100644 index 6bf86c15f2cd9..0000000000000 --- a/tests/api2/test_420_smb.py +++ /dev/null @@ -1,449 +0,0 @@ -import pytest -import sys -import os -import secrets -import string -import uuid -from time import sleep -apifolder = os.getcwd() -sys.path.append(apifolder) -from protocols import smb_connection -from utils import create_dataset -from auto_config import pool_name -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.smb import smb_share -from middlewared.test.integration.assets.pool import dataset as make_dataset -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.utils.system import reset_systemd_svcs - - -AUDIT_WAIT = 10 -SMB_NAME = "TestCifsSMB" -SHAREUSER = 'smbuser420' -PASSWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) - - -@pytest.fixture(scope='module') -def smb_info(): - with make_dataset('smb-cifs', data={'share_type': 'SMB'}) as ds: - with user({ - 'username': SHAREUSER, - 'full_name': SHAREUSER, - 'group_create': True, - 'password': PASSWD - }, get_instance=False): - with smb_share(os.path.join('/mnt', ds), SMB_NAME, { - 'purpose': 'NO_PRESET', - }) as s: - try: - call('smb.update', { - 'guest': SHAREUSER - }) - call('service.update', 'cifs', {'enable': True}) - call('service.start', 'cifs') - yield {'dataset': ds, 'share': s} - finally: - call('smb.update', { - 'guest': 'nobody' - }) - call('service.stop', 'cifs') - call('service.update', 'cifs', {'enable': False}) - - -@pytest.fixture(scope='function') -def enable_guest(smb_info): - smb_id = smb_info['share']['id'] - call('sharing.smb.update', smb_id, {'guestok': True}) - try: - yield - finally: - call('sharing.smb.update', smb_id, {'guestok': False}) - - -@pytest.fixture(scope='function') -def enable_aapl(): - reset_systemd_svcs('smbd') - call('smb.update', {'aapl_extensions': True}) - - try: - yield - finally: - call('smb.update', {'aapl_extensions': False}) - - -@pytest.fixture(scope='function') -def enable_smb1(): - reset_systemd_svcs('smbd') - call('smb.update', {'enable_smb1': True}) - - try: - yield - finally: - call('smb.update', {'enable_smb1': False}) - - -@pytest.fixture(scope='function') -def enable_recycle_bin(smb_info): - smb_id = smb_info['share']['id'] - call('sharing.smb.update', smb_id, {'recyclebin': True}) - - try: - yield - finally: - call('sharing.smb.update', smb_id, {'recyclebin': False}) - - -@pytest.mark.parametrize('proto,runas', [ - ('SMB1', 'GUEST'), - ('SMB2', 'GUEST'), - ('SMB1', SHAREUSER), - ('SMB2', SHAREUSER) -]) -def test__basic_smb_ops(enable_smb1, enable_guest, proto, runas): - with smb_connection( - share=SMB_NAME, - username=runas, - password=PASSWD, - smb1=(proto == 'SMB1') - ) as c: - filename1 = f'testfile1_{proto.lower()}_{runas}.txt' - filename2 = f'testfile2_{proto.lower()}_{runas}.txt' - dirname = f'testdir_{proto.lower()}_{runas}.txt' - - fd = c.create_file(filename1, 'w') - c.write(fd, b'foo') - val = c.read(fd, 0, 3) - c.close(fd, True) - assert val == b'foo' - - c.mkdir(dirname) - fd = c.create_file(f'{dirname}/{filename2}', 'w') - c.write(fd, b'foo2') - val = c.read(fd, 0, 4) - c.close(fd, True) - assert val == b'foo2' - - c.rmdir(dirname) - - # DELETE_ON_CLOSE flag was set prior to closing files - # and so root directory should be empty - assert c.ls('/') == [] - - -def test__change_sharing_smd_home_to_true(smb_info): - reset_systemd_svcs('smbd') - smb_id = smb_info['share']['id'] - share = call('sharing.smb.update', smb_id, {'home': True}) - try: - share_path = call('smb.getparm', 'path', 'homes') - assert share_path == f'{share["path_local"]}/%U' - finally: - new_info = call('sharing.smb.update', smb_id, {'home': False}) - - share_path = call('smb.getparm', 'path', new_info['name']) - assert share_path == share['path_local'] - obey_pam_restrictions = call('smb.getparm', 'obey pam restrictions', 'GLOBAL') - assert obey_pam_restrictions is False - - -def test__change_timemachine_to_true(enable_aapl, smb_info): - smb_id = smb_info['share']['id'] - call('sharing.smb.update', smb_id, {'timemachine': True}) - try: - share_info = call('sharing.smb.query', [['id', '=', smb_id]], {'get': True}) - assert share_info['timemachine'] is True - - enabled = call('smb.getparm', 'fruit:time machine', share_info['name']) - assert enabled == 'True' - - vfs_obj = call('smb.getparm', 'vfs objects', share_info['name']) - assert 'fruit' in vfs_obj - finally: - call('sharing.smb.update', smb_id, {'timemachine': False}) - - -def do_recycle_ops(c, has_subds=False): - # Our recycle repository should be auto-created on connect. - fd = c.create_file('testfile.txt', 'w') - c.write(fd, b'foo') - c.close(fd, True) - - # Above close op also deleted the file and so - # we expect file to now exist in the user's .recycle directory - fd = c.create_file(f'.recycle/{SHAREUSER}/testfile.txt', 'r') - val = c.read(fd, 0, 3) - c.close(fd) - assert val == b'foo' - - # re-open so that we can set DELETE_ON_CLOSE - # this verifies that SMB client can purge file from recycle bin - c.close(c.create_file(f'.recycle/{SHAREUSER}/testfile.txt', 'w'), True) - assert c.ls(f'.recycle/{SHAREUSER}/') == [] - - if not has_subds: - return - - # nested datasets get their own recycle bin to preserve atomicity of - # rename op. - fd = c.create_file('subds/testfile2.txt', 'w') - c.write(fd, b'boo') - c.close(fd, True) - - fd = c.create_file(f'subds/.recycle/{SHAREUSER}/testfile2.txt', 'r') - val = c.read(fd, 0, 3) - c.close(fd) - assert val == b'boo' - - c.close(c.create_file(f'subds/.recycle/{SHAREUSER}/testfile2.txt', 'w'), True) - assert c.ls(f'subds/.recycle/{SHAREUSER}/') == [] - - -def test__recyclebin_functional_test(enable_recycle_bin, smb_info): - with create_dataset(f'{smb_info["dataset"]}/subds', {'share_type': 'SMB'}): - with smb_connection( - share=SMB_NAME, - username=SHAREUSER, - password=PASSWD, - ) as c: - do_recycle_ops(c, True) - - -@pytest.mark.parametrize('smb_config', [ - {'global': {'aapl_extensions': True}, 'share': {'aapl_name_mangling': True}}, - {'global': {'aapl_extensions': True}, 'share': {'aapl_name_mangling': False}}, - {'global': {'aapl_extensions': False}, 'share': {}}, -]) -def test__recyclebin_functional_test_subdir(smb_info, smb_config): - tmp_ds = f"{pool_name}/recycle_test" - tmp_ds_path = f'/mnt/{tmp_ds}/subdir' - tmp_share_name = 'recycle_test' - - reset_systemd_svcs('smbd') - call('smb.update', smb_config['global']) - # basic tests of recyclebin operations - with create_dataset(tmp_ds, {'share_type': 'SMB'}): - ssh(f'mkdir {tmp_ds_path}') - with smb_share(tmp_ds_path, tmp_share_name, { - 'purpose': 'NO_PRESET', - 'recyclebin': True - } | smb_config['share']): - with smb_connection( - share=tmp_share_name, - username=SHAREUSER, - password=PASSWD, - ) as c: - do_recycle_ops(c) - - # more abusive test where first TCON op is opening file in subdir to delete - with create_dataset(tmp_ds, {'share_type': 'SMB'}): - ops = [ - f'mkdir {tmp_ds_path}', - f'mkdir {tmp_ds_path}/subdir', - f'touch {tmp_ds_path}/subdir/testfile', - f'chown {SHAREUSER} {tmp_ds_path}/subdir/testfile', - ] - ssh(';'.join(ops)) - with smb_share(tmp_ds_path, tmp_share_name, { - 'purpose': 'NO_PRESET', - 'recyclebin': True - } | smb_config['share']): - with smb_connection( - share=tmp_share_name, - username=SHAREUSER, - password=PASSWD, - ) as c: - fd = c.create_file('subdir/testfile', 'w') - c.write(fd, b'boo') - c.close(fd, True) - - fd = c.create_file(f'.recycle/{SHAREUSER}/subdir/testfile', 'r') - val = c.read(fd, 0, 3) - c.close(fd) - assert val == b'boo' - - -def test__netbios_name_change_check_sid(): - """ changing netbiosname should not alter our local sid value """ - orig = call('smb.config') - new_sid = call('smb.update', {'netbiosname': 'nb_new'})['cifs_SID'] - - try: - assert new_sid == orig['cifs_SID'] - localsid = call('smb.groupmap_list')['localsid'] - assert new_sid == localsid - finally: - call('smb.update', {'netbiosname': orig['netbiosname']}) - - -AUDIT_FIELDS = [ - 'audit_id', 'timestamp', 'address', 'username', 'session', 'service', - 'service_data', 'event', 'event_data', 'success' -] - - -def validate_vers(vers, expected_major, expected_minor): - assert 'major' in vers, str(vers) - assert 'minor' in vers, str(vers) - assert vers['major'] == expected_major - assert vers['minor'] == expected_minor - - -def validate_svc_data(msg, svc): - assert 'service_data' in msg, str(msg) - svc_data = msg['service_data'] - for key in ['vers', 'service', 'session_id', 'tcon_id']: - assert key in svc_data, str(svc_data) - - assert svc_data['service'] == svc - - assert isinstance(svc_data['session_id'], str) - assert svc_data['session_id'].isdigit() - - assert isinstance(svc_data['tcon_id'], str) - assert svc_data['tcon_id'].isdigit() - - -def validate_event_data(event_data, schema): - event_data_keys = set(event_data.keys()) - schema_keys = set(schema['_attrs_order_']) - assert event_data_keys == schema_keys - - -def validate_audit_op(msg, svc): - schema = call( - 'audit.json_schemas', - [['_name_', '=', f'audit_entry_smb_{msg["event"].lower()}']], - { - 'select': [ - ['_attrs_order_', 'attrs'], - ['properties.event_data', 'event_data'] - ], - } - ) - - assert schema is not [], str(msg) - schema = schema[0] - - for key in schema['attrs']: - assert key in msg, str(msg) - - validate_svc_data(msg, svc) - try: - aid_guid = uuid.UUID(msg['audit_id']) - except ValueError: - raise AssertionError(f'{msg["audit_id"]}: malformed UUID') - - assert str(aid_guid) == msg['audit_id'] - - try: - sess_guid = uuid.UUID(msg['session']) - except ValueError: - raise AssertionError(f'{msg["session"]}: malformed UUID') - - assert str(sess_guid) == msg['session'] - - validate_event_data(msg['event_data'], schema['event_data']) - - -def do_audit_ops(svc): - with smb_connection( - share=svc, - username=SHAREUSER, - password=PASSWD, - ) as c: - fd = c.create_file('testfile.txt', 'w') - for i in range(0, 3): - c.write(fd, b'foo') - c.read(fd, 0, 3) - c.close(fd, True) - - sleep(AUDIT_WAIT) - return call('auditbackend.query', 'SMB', [['event', '!=', 'AUTHENTICATION']]) - - -def test__audit_log(request): - def get_event(event_list, ev_type): - for e in event_list: - if e['event'] == ev_type: - return e - - return None - - with make_dataset('smb-audit', data={'share_type': 'SMB'}) as ds: - with smb_share(os.path.join('/mnt', ds), 'SMB_AUDIT', { - 'purpose': 'NO_PRESET', - 'guestok': True, - 'audit': {'enable': True} - }) as s: - events = do_audit_ops(s['name']) - assert len(events) > 0 - - for ev_type in ['CONNECT', 'DISCONNECT', 'CREATE', 'CLOSE', 'READ', 'WRITE']: - assert get_event(events, ev_type) is not None, str(events) - - for event in events: - validate_audit_op(event, s['name']) - - new_data = call('sharing.smb.update', s['id'], {'audit': {'ignore_list': ['builtin_users']}}) - assert new_data['audit']['enable'], str(new_data['audit']) - assert new_data['audit']['ignore_list'] == ['builtin_users'], str(new_data['audit']) - - # Verify that being member of group in ignore list is sufficient to avoid new messages - # By default authentication attempts are always logged - assert do_audit_ops(s['name']) == events - - new_data = call('sharing.smb.update', s['id'], {'audit': {'watch_list': ['builtin_users']}}) - assert new_data['audit']['enable'], str(new_data['audit']) - assert new_data['audit']['ignore_list'] == ['builtin_users'], str(new_data['audit']) - assert new_data['audit']['watch_list'] == ['builtin_users'], str(new_data['audit']) - - # Verify that watch_list takes precedence - # By default authentication attempts are always logged - new_events = do_audit_ops(s['name']) - assert len(new_events) > len(events) - - new_data = call('sharing.smb.update', s['id'], {'audit': {'enable': False}}) - assert new_data['audit']['enable'] is False, str(new_data['audit']) - assert new_data['audit']['ignore_list'] == ['builtin_users'], str(new_data['audit']) - assert new_data['audit']['watch_list'] == ['builtin_users'], str(new_data['audit']) - - # Verify that disabling audit prevents new messages from being written - assert do_audit_ops(s['name']) == new_events - - -@pytest.mark.parametrize('torture_test', [ - 'local.binding', - 'local.ntlmssp', - 'local.smbencrypt', - 'local.messaging', - 'local.irpc', - 'local.strlist', - 'local.file', - 'local.str', - 'local.time', - 'local.datablob', - 'local.binsearch', - 'local.asn1', - 'local.anonymous_shared', - 'local.strv', - 'local.strv_util', - 'local.util', - 'local.idtree', - 'local.dlinklist', - 'local.genrand', - 'local.iconv', - 'local.socket', - 'local.pac', - 'local.share', - 'local.loadparm', - 'local.charset', - 'local.convert_string', - 'local.string_case_handle', - 'local.tevent_req', - 'local.util_str_escape', - 'local.talloc', - 'local.replace', - 'local.crypto.md4' -]) -def test__local_torture(request, torture_test): - ssh(f'smbtorture //127.0.0.1 {torture_test}') diff --git a/tests/api2/test_425_smb_protocol.py b/tests/api2/test_425_smb_protocol.py deleted file mode 100644 index 67e1021ce03e5..0000000000000 --- a/tests/api2/test_425_smb_protocol.py +++ /dev/null @@ -1,807 +0,0 @@ -#!/usr/bin/env python3 - -import pytest -import sys -import os -import enum -import secrets -import string -from base64 import b64decode, b64encode -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import PUT, GET, SSH_TEST -from auto_config import ( - user, - password, -) -from middlewared.test.integration.assets.account import user as create_user -from middlewared.test.integration.assets.smb import copy_stream, get_stream, smb_share, smb_mount -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.utils.client import truenas_server -from pytest_dependency import depends -from protocols import SMB, smb_connection -from samba import ntstatus -from samba import NTSTATUSError - - -SMB_NAME = "SMBPROTO" -SMB_USER = "smbuser" -SMB_PWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) -TEST_DATA = {} - - -class DOSmode(enum.Enum): - READONLY = 1 - HIDDEN = 2 - SYSTEM = 4 - ARCHIVE = 32 - - -netatalk_metadata = """ -AAUWBwACAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAEAAAAmgAAAAAAAAAIAAABYgAAABAAAAAJAAAA -egAAACAAAAAOAAABcgAAAASAREVWAAABdgAAAAiASU5PAAABfgAAAAiAU1lOAAABhgAAAAiAU1Z+ -AAABjgAAAARQTEFQbHRhcAQQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAIbmGsyG5hrOAAAAAKEvSOAAAAAAAAAAAAAAAAAcBAAAAAAAA9xS5YAAAAAAZ -AAAA -""" - -parsed_meta = """ -QUZQAAAAAQAAAAAAgAAAAFBMQVBsdGFwBBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAA -""" - -apple_kmdlabel = """ -8oBNzAaTG04NeBVAT078KCEjrzPrwPTUuZ4MXK1qVRDlBqLATmFSDFO2hXrS5VWsrg1DoZqeX6kF -zDEInIzw2XrZkI9lY3jvMAGXu76QvwrpRGv1G3Ehj+0= -""" - -apple_kmditemusertags = """ -YnBsaXN0MDCgCAAAAAAAAAEBAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAJ -""" - -AFPXattr = { - "org.netatalk.Metadata": { - "smbname": "AFP_AfpInfo", - "text": netatalk_metadata, - "bytes": b64decode(netatalk_metadata), - "smb_text": parsed_meta, - "smb_bytes": b64decode(parsed_meta) - }, - "com.apple.metadata:_kMDItemUserTags": { - "smbname": "com.apple.metadata_kMDItemUserTags", - "text": apple_kmditemusertags, - "bytes": b64decode(apple_kmditemusertags) - }, - "com.apple.metadata:kMDLabel_anemgxoe73iplck2hfuumqxdbu": { - "smbname": "com.apple.metadatakMDLabel_anemgxoe73iplck2hfuumqxdbu", - "text": apple_kmdlabel, - "bytes": b64decode(apple_kmdlabel) - }, -} - - -@pytest.fixture(scope='module') -def initialize_for_smb_tests(request): - with dataset('smb-proto', data={'share_type': 'SMB'}) as ds: - with create_user({ - 'username': SMB_USER, - 'full_name': SMB_USER, - 'group_create': True, - 'password': SMB_PWD - }) as u: - try: - with smb_share(os.path.join('/mnt', ds), SMB_NAME, { - 'auxsmbconf': 'zfs_core:base_user_quota = 1G' - }) as s: - try: - call('service.start', 'cifs') - yield {'dataset': ds, 'share': s, 'user': u} - finally: - call('service.stop', 'cifs') - finally: - # In test_140_enable_aapl we turned afp on for the share, so wait until - # it has been destroyed before turning off aapl_extensions. - call('smb.update', { - 'enable_smb1': False, - 'aapl_extensions': False - }) - - -@pytest.fixture(scope='module') -def mount_share(): - with smb_mount(TEST_DATA['share']['name'], SMB_USER, SMB_PWD) as mp: - yield {'mountpoint': mp} - - -@pytest.mark.dependency(name="SMB_SHARE_CREATED") -def test_001_initialize_smb_servce(initialize_for_smb_tests): - TEST_DATA.update(initialize_for_smb_tests) - - -def test_002_check_client_count(request): - depends(request, ["SMB_SHARE_CREATED"]) - with smb_connection( - share=SMB_NAME, - username=SMB_USER, - password=SMB_PWD, - smb1=False - ) as c: - assert call("smb.client_count") == 1 - - -@pytest.mark.dependency(name="SHARE_IS_WRITABLE") -def test_009_share_is_writable(request): - """ - This test creates creates an empty file, sets "delete on close" flag, then - closes it. NTStatusError should be raised containing failure details - if we are for some reason unable to access the share. - - This test will fail if smb.conf / smb4.conf does not exist on client / server running test. - """ - depends(request, ["SMB_SHARE_CREATED"]) - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False) - fd = c.create_file("testfile", "w") - c.close(fd, True) - c.disconnect() - - -@pytest.mark.parametrize('dm', DOSmode) -def test_010_check_dosmode_create(request, dm): - """ - This tests the setting of different DOS attributes through SMB2 Create. - after setting - """ - depends(request, ["SHARE_IS_WRITABLE"]) - if dm.value > DOSmode.SYSTEM.value: - return - - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False) - if dm == DOSmode.READONLY: - c.create_file(dm.name, "w", "r") - elif dm == DOSmode.HIDDEN: - c.create_file(dm.name, "w", "h") - elif dm == DOSmode.SYSTEM: - c.create_file(dm.name, "w", "s") - dir_listing = c.ls("/") - for f in dir_listing: - if f['name'] != dm.name: - continue - # Archive is automatically set by kernel - to_check = f['attrib'] & ~DOSmode.ARCHIVE.value - c.disconnect() - assert (to_check & dm.value) != 0, f - - -def test_011_check_dos_ro_cred_handling(request): - """ - This test creates a file with readonly attribute set, then - uses the open fd to write data to the file. - """ - depends(request, ["SHARE_IS_WRITABLE"]) - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False) - fd = c.create_file("RO_TEST", "w", "r") - c.write(fd, b"TESTING123\n") - c.disconnect() - - -@pytest.mark.dependency(name="SMB1_ENABLED") -def test_050_enable_smb1(request): - depends(request, ["SMB_SHARE_CREATED"]) - payload = { - "enable_smb1": True, - } - results = PUT("/smb/", payload) - assert results.status_code == 200, results.text - - -@pytest.mark.dependency(name="SHARE_IS_WRITABLE_SMB1") -def test_051_share_is_writable_smb1(request): - """ - This test creates creates an empty file, sets "delete on close" flag, then - closes it. NTStatusError should be raised containing failure details - if we are for some reason unable to access the share. - - This test will fail if client min protocol != NT1 in smb.conf of SMB client. - Sample smb.conf entry: - - [global] - client min protocol = nt1 - """ - depends(request, ["SMB_SHARE_CREATED"]) - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True) - fd = c.create_file("testfile", "w") - c.close(fd, True) - c.disconnect() - - -@pytest.mark.parametrize('dm', DOSmode) -def test_052_check_dosmode_create_smb1(request, dm): - """ - This tests the setting of different DOS attributes through SMB1 create. - after setting - """ - depends(request, ["SHARE_IS_WRITABLE"]) - if dm.value > DOSmode.SYSTEM.value: - return - - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True) - if dm == DOSmode.READONLY: - c.create_file(f'{dm.name}_smb1', "w", "r") - elif dm == DOSmode.HIDDEN: - c.create_file(f'{dm.name}_smb1', "w", "h") - elif dm == DOSmode.SYSTEM: - c.create_file(f'{dm.name}_smb1', "w", "s") - dir_listing = c.ls("/") - for f in dir_listing: - if f['name'] != f'{dm.name}_smb1': - continue - # Archive is automatically set by kernel - to_check = f['attrib'] & ~DOSmode.ARCHIVE.value - c.disconnect() - assert (to_check & dm.value) != 0, f - - -@pytest.mark.dependency(name="STREAM_TESTFILE_CREATED") -def test_060_create_base_file_for_streams_tests(request): - """ - Create the base file that we will use for further stream tests. - """ - depends(request, ["SMB_SHARE_CREATED"]) - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True) - fd = c.create_file("streamstestfile", "w") - c.close(fd) - c.mkdir("streamstestdir") - c.disconnect() - - -@pytest.mark.dependency(name="STREAM_WRITTEN_SMB2") -def test_061_create_and_write_stream_smb2(request, mount_share): - """ - Create our initial stream and write to it over SMB2/3 protocol. - Start with offset 0. - """ - depends(request, ["STREAM_TESTFILE_CREATED"]) - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False) - fd = c.create_file("streamstestfile:smb2_stream", "w") - c.write(fd, b'test1', 0) - c.close(fd) - - fd2 = c.create_file("streamstestdir:smb2_stream", "w") - c.write(fd2, b'test2', 0) - c.close(fd2) - - fd3 = c.create_file("streamstestfile:smb2_stream", "w") - contents = c.read(fd3, 0, 5) - c.close(fd3) - kcontent1 = get_stream('streamstestfile', 'smb2_stream') - - fd4 = c.create_file("streamstestdir:smb2_stream", "w") - contents2 = c.read(fd4, 0, 5) - c.close(fd4) - kcontent2 = get_stream('streamstestdir', 'smb2_stream') - - c.rmdir("streamstestdir") - c.disconnect() - assert (contents.decode() == "test1") - assert (contents2.decode() == "test2") - - # Remove samba compatibility NULL byte - assert kcontent1[:-1].decode() == 'test1' - assert kcontent2[:-1].decode() == 'test2' - - -@pytest.mark.dependency(name="LARGE_STREAM_WRITTEN_SMB2") -def test_062_write_stream_large_offset_smb2(request, mount_share): - """ - Append to our existing stream over SMB2/3 protocol. Specify an offset that will - cause resuling xattr to exceed 64KiB default xattr size limit in Linux. - """ - depends(request, ["STREAM_TESTFILE_CREATED"]) - with smb_connection( - share=SMB_NAME, - username=SMB_USER, - password=SMB_PWD, - smb1=False - ) as c: - fd = c.create_file("streamstestfile:smb2_stream", "w") - try: - c.write(fd, b'test2', 131072) - finally: - c.close(fd) - - fd2 = c.create_file("streamstestfile:smb2_stream", "w") - try: - contents = c.read(fd2, 131072, 5) - finally: - c.close(fd2) - - kcontent = get_stream('streamstestfile', 'smb2_stream') - - assert (contents.decode() == "test2") - - # Verify that reading a large stream functions correctly - assert len(kcontent) == 131072 + 5 + 1 - - # Remove samba compatibility NULL byte - assert kcontent[131072:-1].decode() == 'test2' - - # Verify that SMB server rejects too-large stream write - fd = c.create_file("streamstestfile:smb2_stream", "w") - try: - with pytest.raises(NTSTATUSError) as e: - c.write(fd, b'test2', 2097152) - - assert e.value.args[0] == ntstatus.NT_STATUS_FILE_SYSTEM_LIMITATION - finally: - c.close(fd) - - # Verify that SMB server allows _very_ large write - fd = c.create_file("streamstestfile:smb2_stream", "w") - try: - # We have to an extra byte for that nul at end of xattr - offset = 2097152 - (len(b"test2") + 1) - c.write(fd, b"test2", offset) - contents = c.read(fd, offset, 5) - assert contents.decode() == "test2" - finally: - c.close(fd) - - copy_stream('streamstestfile', 'smb2_stream', 'smb2_stream2') - - fd = c.create_file("streamstestfile:smb2_stream", "r") - try: - contents_stream1 = c.read(fd, 0, 2097152) - finally: - c.close(fd) - - fd = c.create_file("streamstestfile:smb2_stream2", "r") - try: - contents_stream2 = c.read(fd, 0, 2097152) - finally: - c.close(fd) - - assert contents_stream1 == contents_stream2 - - -def test_063_stream_delete_on_close_smb2(request): - """ - Set delete_on_close on alternate datastream over SMB2/3 protocol, close, then verify - stream was deleted. - - TODO: I have open MR to expand samba python bindings to support stream enumeration. - Verifcation of stream deletion will have to be added once this is merged. - """ - depends(request, ["STREAM_WRITTEN_SMB2", "LARGE_STREAM_WRITTEN_SMB2"]) - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False) - fd = c.create_file("streamstestfile:smb2_stream", "w") - c.close(fd, True) - - c.disconnect() - - -@pytest.mark.dependency(name="STREAM_WRITTEN_SMB1") -def test_065_create_and_write_stream_smb1(request): - """ - Create our initial stream and write to it over SMB1 protocol. - Start with offset 0. - """ - depends(request, ["STREAM_TESTFILE_CREATED"]) - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True) - fd = c.create_file("streamstestfile:smb1_stream", "w") - c.write(fd, b'test1', 0) - c.close(fd) - - fd2 = c.create_file("streamstestfile:smb1_stream", "w") - contents = c.read(fd2, 0, 5) - c.close(fd2) - c.disconnect() - assert (contents.decode() == "test1") - - -@pytest.mark.dependency(name="LARGE_STREAM_WRITTEN_SMB1") -def test_066_write_stream_large_offset_smb1(request): - """ - Append to our existing stream over SMB1 protocol. Specify an offset that will - cause resuling xattr to exceed 64KiB default xattr size limit in Linux. - """ - depends(request, ["STREAM_WRITTEN_SMB1"]) - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True) - fd = c.create_file("streamstestfile:smb1_stream", "w") - c.write(fd, b'test2', 131072) - c.close(fd) - - fd2 = c.create_file("streamstestfile:smb1_stream", "w") - contents = c.read(fd2, 131072, 5) - c.close(fd2) - c.disconnect() - assert (contents.decode() == "test2") - - -def test_067_stream_delete_on_close_smb1(request): - """ - Set delete_on_close on alternate datastream over SMB1 protocol, close, then verify - stream was deleted. - - TODO: I have open MR to expand samba python bindings to support stream enumeration. - Verifcation of stream deletion will have to be added once this is merged. - """ - depends(request, ["STREAM_WRITTEN_SMB1", "LARGE_STREAM_WRITTEN_SMB1"]) - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True) - fd = c.create_file("streamstestfile:smb1_stream", "w") - c.close(fd, True) - - c.disconnect() - - -def test_068_case_insensitive_rename(request): - """ - ZFS is case sensitive, but case preserving when casesensitivity == insensitive - - rename of to_rename -> To_rename should succeed and new file appear - correctly in directory listing. - - Will fail with NT_STATUS_OBJECT_NAME_COLLISION if we have regression and - samba identifies files as same. - """ - depends(request, ["SHARE_IS_WRITABLE"]) - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True) - fd = c.create_file("to_rename", "w") - c.close(fd) - c.rename("to_rename", "To_rename") - files = [x['name'] for x in c.ls('\\')] - c.disconnect() - assert ("To_rename" in files) - - -def test_069_normal_rename(request): - """ - This verifies that renames are successfully completed - """ - depends(request, ["SHARE_IS_WRITABLE"]) - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True) - fd = c.create_file("old_file_to_rename", "w") - c.close(fd) - c.rename("old_file_to_rename", "renamed_new_file") - files = [x['name'] for x in c.ls('\\')] - c.disconnect() - assert ("renamed_new_file" in files) - - -""" -At this point we grant SMB_USER SeDiskOperatorPrivilege by making it a member -of the local group builtin_administrators. This privilege is required to manipulate -SMB quotas. -""" - - -@pytest.mark.dependency(name="BA_ADDED_TO_USER") -def test_089_add_to_builtin_admins(request): - depends(request, ["SHARE_IS_WRITABLE"]) - smbuser_id = TEST_DATA['user']['id'] - ba = GET('/group?group=builtin_administrators').json() - assert len(ba) != 0 - - userinfo = GET(f'/user/id/{smbuser_id}').json() - groups = userinfo['groups'] - groups.append(ba[0]['id']) - - payload = {'groups': groups} - results = PUT(f"/user/id/{smbuser_id}/", payload) - assert results.status_code == 200, f"res: {results.text}, payload: {payload}" - - -@pytest.mark.parametrize('proto', ["SMB2"]) -def test_090_test_auto_smb_quota(request, proto): - """ - Since the share is configured wtih ixnas:base_user_quota parameter, - the first SMB tree connect should have set a ZFS user quota on the - underlying dataset. Test querying through the SMB protocol. - - Currently SMB1 protocol is disabled because of hard-coded check in - source3/smbd/nttrans.c to only allow root to get/set quotas. - """ - depends(request, ["BA_ADDED_TO_USER"]) - c = SMB() - qt = c.get_quota( - share=SMB_NAME, - username=SMB_USER, - password=SMB_PWD, - smb1=(proto == "SMB1") - ) - - # There should only be one quota entry - assert len(qt) == 1, qt - - # username is prefixed with server netbios name "SERVER\user" - assert qt[0]['user'].endswith(SMB_USER), qt - - # Hard and Soft limits should be set to value above (1GiB) - assert qt[0]['soft_limit'] == (2 ** 30), qt - assert qt[0]['hard_limit'] == (2 ** 30), qt - - -def test_091_remove_auto_quota_param(request): - depends(request, ["SMB_SHARE_CREATED"]) - call('sharing.smb.update', TEST_DATA['share']['id'], { - 'auxsmbconf': '' - }) - - -@pytest.mark.parametrize('proto', ["SMB2"]) -def test_092_set_smb_quota(request, proto): - """ - This test checks our ability to set a ZFS quota - through the SMB protocol by first setting a 2 GiB - quota, then reading it through the SMB protocol, then - resetting to zero. - """ - depends(request, ["BA_ADDED_TO_USER"]) - new_quota = 2 * (2**30) - c = SMB() - qt = c.set_quota( - share=SMB_NAME, - username=SMB_USER, - password=SMB_PWD, - hardlimit=new_quota, - target=SMB_USER, - smb1=(proto == "SMB1") - ) - assert len(qt) == 1, qt - assert qt[0]['user'].endswith(SMB_USER), qt - assert qt[0]['soft_limit'] == new_quota, qt - assert qt[0]['hard_limit'] == new_quota, qt - - qt = c.get_quota( - share=SMB_NAME, - username=SMB_USER, - password=SMB_PWD, - smb1=(proto == "SMB1") - ) - assert len(qt) == 1, qt - assert qt[0]['user'].endswith(SMB_USER), qt - assert qt[0]['soft_limit'] == new_quota, qt - assert qt[0]['hard_limit'] == new_quota, qt - - qt = c.set_quota( - share=SMB_NAME, - username=SMB_USER, - password=SMB_PWD, - hardlimit=-1, - target=SMB_USER, - smb1=(proto == "SMB1") - ) - assert len(qt) == 1, qt - assert qt[0]['user'].endswith(SMB_USER), qt - assert qt[0]['soft_limit'] is None, qt - assert qt[0]['hard_limit'] is None, qt - - qt = c.get_quota( - share=SMB_NAME, - username=SMB_USER, - password=SMB_PWD, - smb1=(proto == "SMB1") - ) - assert len(qt) == 1, qt - assert qt[0]['user'].endswith(SMB_USER), qt - assert qt[0]['soft_limit'] is None, qt - assert qt[0]['hard_limit'] is None, qt - - -def test_95_strip_quota(request): - """ - This test removes any quota set for the test smb user - """ - depends(request, ["BA_ADDED_TO_USER"]) - call('pool.dataset.set_quota', TEST_DATA['dataset'], [{ - 'quota_type': 'USER', - 'id': SMB_USER, - 'quota_value': 0 - }]) - - -@pytest.mark.dependency(name="AFP_ENABLED") -def test_140_enable_aapl(request): - depends(request, ["SMB_SHARE_CREATED"]) - call('smb.update', {'aapl_extensions': True}) - call('sharing.smb.update', TEST_DATA['share']['id'], { - 'afp': True, - }) - - -@pytest.mark.dependency(name="SSH_XATTR_SET") -@pytest.mark.parametrize('xat', AFPXattr.keys()) -def test_151_set_xattr_via_ssh(request, xat): - """ - Iterate through AFP xattrs and set them on testfile - via SSH. - """ - depends(request, ["AFP_ENABLED"], scope="session") - smb_path = TEST_DATA['share']['path'] - afptestfile = f'{smb_path}/afp_xattr_testfile' - cmd = f'touch {afptestfile} && chown {SMB_USER} {afptestfile} && ' - cmd += f'echo -n \"{AFPXattr[xat]["text"]}\" | base64 -d | ' - cmd += f'attr -q -s {xat} {afptestfile}' - - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, {"cmd": cmd, "res": results['output']} - - -@pytest.mark.dependency(name="XATTR_CHECK_SMB_READ") -@pytest.mark.parametrize('xat', AFPXattr.keys()) -def test_152_check_xattr_via_smb(request, mount_share, xat): - """ - Read xattr that was written via SSH and verify that - data is same when viewed over SMB. - """ - depends(request, ["SSH_XATTR_SET"]) - afptestfile = f'afp_xattr_testfile:{AFPXattr[xat]["smbname"]}' - bytes_to_read = AFPXattr[xat]["smb_bytes"] if xat == "org.netatalk.Metadata" else AFPXattr[xat]["bytes"] - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False) - fd = c.create_file(afptestfile, "w") - xat_bytes = c.read(fd, 0, len(bytes_to_read) + 1) - c.close(fd) - c.disconnect() - - err = { - "name": xat, - "b64data": b64encode(xat_bytes) - } - - # Python base64 library appends a `\t` to end of byte string - assert xat_bytes == bytes_to_read, str(err) - - # Check via kernel client. - kcontent = get_stream('afp_xattr_testfile', AFPXattr[xat]['smbname']) - err = { - "name": xat, - "b64data": b64encode(kcontent[:-1]) - } - assert kcontent[:-1] == bytes_to_read, str(err) - - -@pytest.mark.dependency(name="XATTR_CHECK_SMB_UNLINK") -@pytest.mark.parametrize('xat', AFPXattr.keys()) -def test_153_unlink_xattr_via_smb(request, xat): - """ - Open AFP xattr, set "delete on close" flag, then close. - """ - depends(request, ["XATTR_CHECK_SMB_READ"]) - afptestfile = f'afp_xattr_testfile:{AFPXattr[xat]["smbname"]}' - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False) - fd = c.create_file(afptestfile, "w") - c.close(fd, True) - c.disconnect() - - -@pytest.mark.dependency(name="XATTR_CHECK_SMB_WRITE") -@pytest.mark.parametrize('xat', AFPXattr.keys()) -def test_154_write_afp_xattr_via_smb(request, xat): - """ - Write xattr over SMB - """ - depends(request, ["XATTR_CHECK_SMB_UNLINK"]) - afptestfile = f'afp_xattr_testfile:{AFPXattr[xat]["smbname"]}' - payload = AFPXattr[xat]["smb_bytes"] if xat == "org.netatalk.Metadata" else AFPXattr[xat]["bytes"] - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False) - fd = c.create_file(afptestfile, "w") - c.write(fd, payload) - c.close(fd) - c.disconnect() - - -@pytest.mark.parametrize('xat', AFPXattr.keys()) -def test_155_ssh_read_afp_xattr(request, xat): - """ - Read xattr that was set via SMB protocol directly via - SSH and verify that data is the same. - """ - depends(request, ["XATTR_CHECK_SMB_WRITE"], scope="session") - # Netatalk-compatible xattr gets additional - # metadata written to it, which makes comparison - # of all bytes problematic. - if xat == "org.netatalk.Metadata": - return - - smb_path = TEST_DATA['share']['path'] - afptestfile = f'{smb_path}/afp_xattr_testfile' - cmd = f'attr -q -g {xat} {afptestfile} | base64' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - xat_data = b64decode(results['stdout']) - assert AFPXattr[xat]['bytes'] == xat_data, results['output'] - - -def test_175_check_external_path(request): - with smb_share(f'EXTERNAL:{truenas_server.ip}\\{SMB_NAME}', 'EXTERNAL'): - with smb_connection( - share=SMB_NAME, - username=SMB_USER, - password=SMB_PWD, - smb1=False - ) as c: - fd = c.create_file('external_test_file', "w") - c.write(fd, b'EXTERNAL_TEST') - c.close(fd) - - cmd = f'smbclient //127.0.0.1/EXTERNAL -U {SMB_USER}%{SMB_PWD} ' - cmd += '-c "get external_test_file"' - ssh(cmd) - - results = SSH_TEST('cat external_test_file', user, password) - assert results['result'] is True, results['output'] - assert results['stdout'] == 'EXTERNAL_TEST' - - -def test_176_check_dataset_auto_create(request): - with dataset('smb_proto_nested_datasets', data={'share_type': 'SMB'}) as ds: - ds_mp = os.path.join('/mnt', ds) - with smb_share(ds_mp, 'DATASETS', {'purpose': 'PRIVATE_DATASETS'}): - with smb_connection( - share='DATASETS', - username=SMB_USER, - password=SMB_PWD, - smb1=False - ) as c: - fd = c.create_file('nested_test_file', "w") - c.write(fd, b'EXTERNAL_TEST') - c.close(fd) - - acl = call('filesystem.getacl', os.path.join(ds_mp, SMB_USER), True) - assert acl['trivial'] is False, str(acl) - - -def test_180_create_share_multiple_dirs_deep(request): - with dataset('nested_dirs', data={'share_type': 'SMB'}) as ds: - dirs_path = os.path.join('/mnt', ds, 'd1/d2/d3') - ssh(f'mkdir -p {dirs_path}') - - with smb_share(dirs_path, 'DIRS'): - with smb_connection( - share='DIRS', - username=SMB_USER, - password=SMB_PWD, - smb1=False - ) as c: - fd = c.create_file('nested_dirs_file', "w") - c.write(fd, b'DIRS_TEST') - c.close(fd) - - call('filesystem.stat', os.path.join(dirs_path, 'nested_dirs_file')) - - -def test_181_create_and_disable_share(request): - with dataset('smb_disabled', data={'share_type': 'SMB'}) as ds: - with smb_share(os.path.join('/mnt', ds), 'TO_DISABLE') as tmp_share: - with smb_connection( - share='TO_DISABLE', - username=SMB_USER, - password=SMB_PWD, - smb1=False - ) as c: - call('sharing.smb.update', tmp_share['id'], {'enabled': False}) - try: - c.create_file('canary', "w") - except NTSTATUSError as status: - assert status.args[0] == ntstatus.NT_STATUS_NETWORK_NAME_DELETED, str(status) - else: - assert c.connected is True diff --git a/tests/api2/test_426_smb_vss.py b/tests/api2/test_426_smb_vss.py deleted file mode 100644 index cbc7a431606a3..0000000000000 --- a/tests/api2/test_426_smb_vss.py +++ /dev/null @@ -1,361 +0,0 @@ -#!/usr/bin/env python3 - -import pytest -import sys -import os -from subprocess import run -from time import sleep -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import PUT, POST, GET, DELETE, SSH_TEST, wait_on_job -from auto_config import ( - pool_name, - user, - password, -) -from pytest_dependency import depends -from protocols import SMB -from samba import ntstatus -from middlewared.test.integration.utils.client import truenas_server - - -dataset = f"{pool_name}/smb-vss" -dataset_url = dataset.replace('/', '%2F') -dataset_nested = f"{dataset}/sub1" -dataset_nested_url = dataset_nested.replace('/', '%2F') - -SMB_NAME = "SMBVSS" -smb_path = "/mnt/" + dataset - -SMB_USER = "smbshadowuser" -SMB_PWD = "smb1234" - -to_check = [ - 'testfile1', - f'{SMB_USER}/testfile2', - 'sub1/testfile3' -] - -snapshots = { - 'snapshot1': {'gmt_string': '', 'offset': 18}, - 'snapshot2': {'gmt_string': '', 'offset': 36}, - 'snapshot3': {'gmt_string': '', 'offset': 54}, -} - - -def check_previous_version_exists(path, home=False): - ip = truenas_server.ip - cmd = [ - 'smbclient', - f'//{ip}/{SMB_NAME if not home else SMB_USER}', - '-U', f'{SMB_USER}%{SMB_PWD}', - '-c' f'open {path}' - ] - cli_open = run(cmd, capture_output=True) - if cli_open.returncode != 0: - return ( - ntstatus.NT_STATUS_FAIL_CHECK, - 'NT_STATUS_FAIL_CHECK', - cli_open.stderr.decode() - ) - - cli_output = cli_open.stdout.decode().strip() - if 'NT_STATUS_' not in cli_output: - return (0, 'NT_STATUS_OK', cli_output) - - cli_output = cli_output.rsplit(' ', 1) - - return ( - ntstatus.__getattribute__(cli_output[1]), - cli_output[1], - cli_output[0] - ) - - -""" -def check_previous_version_contents(path, contents, offset): - cmd = [ - 'smbclient', - f'//{ip}/{SMB_NAME}', - '-U', f'{SMB_USER}%{SMB_PWD}', - '-c' f'prompt OFF; mget {path}' - ] - cli_get = run(cmd, capture_output=True) - if cli_get.returncode != 0: - return ( - ntstatus.NT_STATUS_FAIL_CHECK, - 'NT_STATUS_FAIL_CHECK', - cli_open.stderr.decode() - ) - - cli_output = cli_get.stdout.decode().strip() - if 'NT_STATUS_' in cli_output: - cli_output = cli_output.rsplit(' ', 1) - return ( - ntstatus.__getattribute__(cli_output[1]), - cli_output[0] - ) - - with open(path[25:], "rb") as f: - bytes = f.read() - - to_check = bytes[offset:] - assert len(to_check) == 9, f'path: {path}, contents: {to_check.decode()}' - os.unlink(path[25:]) - assert to_check.decode() == contents, path - return (0, ) -""" - - -@pytest.mark.parametrize('ds', [dataset, dataset_nested]) -@pytest.mark.dependency(name="VSS_DATASET_CREATED") -def test_001_creating_smb_dataset(request, ds): - payload = { - "name": ds, - "share_type": "SMB" - } - results = POST("/pool/dataset/", payload) - assert results.status_code == 200, results.text - result = POST("/zfs/snapshot/", { - "dataset": ds, - "name": "init", - }) - assert result.status_code == 200, results.text - - -@pytest.mark.dependency(name="VSS_USER_CREATED") -def test_002_creating_shareuser_to_test_acls(request): - depends(request, ['VSS_DATASET_CREATED']) - - global vssuser_id - global next_uid - results = GET('/user/get_next_uid/') - assert results.status_code == 200, results.text - next_uid = results.json() - - payload = { - "username": SMB_USER, - "full_name": "SMB User", - "group_create": True, - "password": SMB_PWD, - "uid": next_uid, - } - results = POST("/user/", payload) - assert results.status_code == 200, results.text - vssuser_id = results.json() - - -def test_003_changing_dataset_owner(request): - depends(request, ["VSS_USER_CREATED"]) - payload = { - 'path': smb_path, - 'uid': next_uid, - 'options': {'recursive': True, 'traverse': True}, - } - results = POST('/filesystem/chown/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 180) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -@pytest.mark.dependency(name="VSS_SHARE_CREATED") -def test_004_creating_a_smb_share_path(request): - depends(request, ["VSS_DATASET_CREATED"], scope="session") - global payload, results, smb_id - payload = { - "comment": "SMB VSS Testing Share", - "path": smb_path, - "name": SMB_NAME, - "purpose": "NO_PRESET", - } - results = POST("/sharing/smb/", payload) - assert results.status_code == 200, results.text - smb_id = results.json()['id'] - - cmd = f'mkdir {smb_path}/{SMB_USER}; zpool sync; net cache flush' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, {"cmd": cmd, "res": results['output']} - - -@pytest.mark.dependency(name="VSS_SMB_SERVICE_STARTED") -def test_005_starting_cifs_service(request): - depends(request, ["VSS_SHARE_CREATED"]) - payload = {"service": "cifs"} - results = POST("/service/start/", payload) - assert results.status_code == 200, results.text - - -@pytest.mark.dependency(name="VSS_SMB1_ENABLED") -def test_006_enable_smb1(request): - depends(request, ["VSS_SHARE_CREATED"]) - payload = { - "enable_smb1": True, - } - results = PUT("/smb/", payload) - assert results.status_code == 200, results.text - - -@pytest.mark.dependency(name="SHARE_HAS_SHADOW_COPIES") -@pytest.mark.parametrize('proto', ["SMB1", "SMB2"]) -def test_007_check_shadow_copies(request, proto): - """ - This is very basic validation of presence of snapshot - over SMB1 and SMB2/3. - """ - depends(request, ["VSS_USER_CREATED"]) - c = SMB() - snaps = c.get_shadow_copies( - share=SMB_NAME, - username=SMB_USER, - password=SMB_PWD, - smb1=(proto == "SMB1") - ) - assert len(snaps) == 1, snaps - - -@pytest.mark.dependency(name="VSS_TESTFILES_CREATED") -@pytest.mark.parametrize('payload', [ - 'snapshot1', 'snapshot2', 'snapshot3' -]) -def test_008_set_up_testfiles(request, payload): - depends(request, ["SHARE_HAS_SHADOW_COPIES"]) - i = int(payload[-1]) - offset = i * 2 * len(payload) - c = SMB() - c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False) - - for f in to_check: - fd = c.create_file(f, "w") - c.write(fd, payload.encode(), offset) - c.close(fd) - - fd = c.create_file(f'{f}:smb2_stream', 'w') - c.write(fd, payload.encode(), offset) - c.close(fd) - - sleep(5) - result = POST("/zfs/snapshot/", { - "dataset": dataset, - "name": payload, - "recursive": True, - }) - assert result.status_code == 200, results.text - - -@pytest.mark.parametrize('proto', ["SMB1", "SMB2"]) -def test_009_check_shadow_copies_count_after_setup(request, proto): - """ - This is very basic validation of presence of snapshot - over SMB1 and SMB2/3. - """ - depends(request, ["VSS_USER_CREATED"]) - c = SMB() - snaps = c.get_shadow_copies( - share=SMB_NAME, - username=SMB_USER, - password=SMB_PWD, - smb1=(proto == "SMB1") - ) - assert len(snaps) == 4, snaps - snaps.sort() - for idx, gmt in enumerate(snaps[1:]): - snapshots[f'snapshot{idx + 1}']['gmt_string'] = gmt - - -@pytest.mark.dependency(name="VSS_TESTFILES_VALIDATED") -@pytest.mark.parametrize('zfs, gmt_data', snapshots.items()) -def test_010_check_previous_versions_of_testfiles(request, zfs, gmt_data): - """ - This test verifies that previous versions of files can be opened successfully - in the following situations: - 1) root of share - 2) subdirectory in share - 3) child dataset in share - - in (1) - (3) above, ability to open alternate data streams is also verified. - """ - depends(request, ["VSS_TESTFILES_CREATED"]) - - vers = gmt_data['gmt_string'] - for f in to_check: - the_file = f'{vers}/{f}' - err, errstr, msg = check_previous_version_exists(the_file) - assert err == 0, f'{the_file}: {errstr} - {msg}' - - """ - # further development of libsmb / smbclient required for this test - # best bet is to add a kwarg to py-libsmb create to allow openinging - # previous version of file. - err, msg = check_previous_version_contents(the_file, zfs, gmt_data['offset']) - assert err == 0, f'{the_file}: {msg}' - """ - err, errstr, msg = check_previous_version_exists(f'{the_file}:smb2_stream') - assert err == 0, f'{the_file}:smb2_stream: {errstr} - {msg}' - - -def test_011_convert_to_home_share(request): - depends(request, ["VSS_TESTFILES_VALIDATED"]) - payload = { - "home": True, - } - results = PUT(f"/sharing/smb/id/{smb_id}", payload) - assert results.status_code == 200, results.text - - -@pytest.mark.parametrize('zfs, gmt_data', snapshots.items()) -def test_012_check_previous_versions_of_testfiles_home_share(request, zfs, gmt_data): - """ - This test verifies that previous versions of files can be opened successfully - in the following situations: - 1) root of share - 2) subdirectory in share - 3) child dataset in share - - in (1) - (3) above, ability to open alternate data streams is also verified. - Differs from previous test in that this one covers a "home" share, which is - of a directory inside a ZFS dataset, which means that internally samba cwd - has to change to path outside of share root. - """ - depends(request, ["VSS_TESTFILES_VALIDATED"]) - the_file = f'{gmt_data["gmt_string"]}/testfile2' - err, errstr, msg = check_previous_version_exists(the_file, True) - assert err == 0, f'{the_file}: {errstr} - {msg}' - - -def test_050_delete_smb_user(request): - depends(request, ["VSS_USER_CREATED"]) - results = DELETE(f"/user/id/{vssuser_id}/", {"delete_group": True}) - assert results.status_code == 200, results.text - - results = DELETE(f"/sharing/smb/id/{smb_id}") - assert results.status_code == 200, results.text - -def test_051_disable_smb1(request): - depends(request, ["VSS_SMB1_ENABLED"]) - payload = { - "enable_smb1": False, - "aapl_extensions": False, - } - results = PUT("/smb/", payload) - assert results.status_code == 200, results.text - - -def test_052_stopping_smb_service(request): - depends(request, ["VSS_SMB_SERVICE_STARTED"]) - payload = {"service": "cifs"} - results = POST("/service/stop/", payload) - assert results.status_code == 200, results.text - sleep(1) - - -def test_053_checking_if_smb_is_stoped(request): - depends(request, ["VSS_SMB_SERVICE_STARTED"]) - results = GET("/service?service=cifs") - assert results.json()[0]['state'] == "STOPPED", results.text - - -def test_054_destroying_smb_dataset(request): - depends(request, ["VSS_DATASET_CREATED"]) - results = DELETE(f"/pool/dataset/id/{dataset_url}/", {'recursive': True}) - assert results.status_code == 200, results.text diff --git a/tests/api2/test_427_smb_acl.py b/tests/api2/test_427_smb_acl.py deleted file mode 100644 index 0e6e82e2ee238..0000000000000 --- a/tests/api2/test_427_smb_acl.py +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/env python3 - -import errno -import pytest -import sys -import os -import secrets -import string -import subprocess -apifolder = os.getcwd() -sys.path.append(apifolder) -from auto_config import ( - pool_name, -) -from middlewared.service_exception import ValidationError, ValidationErrors -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.smb import smb_share -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.client import truenas_server -from middlewared.test.integration.utils.unittest import RegexString -from protocols import SMB -from pytest_dependency import depends -from time import sleep -from utils import create_dataset - - -SMB_USER = "smbacluser" -SMB_PWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) -TEST_DATA = {} -OWNER_RIGHTS_SID = 'S-1-3-4' - - -permset = { - "READ_DATA": False, - "WRITE_DATA": False, - "APPEND_DATA": False, - "READ_NAMED_ATTRS": False, - "WRITE_NAMED_ATTRS": False, - "EXECUTE": False, - "DELETE_CHILD": False, - "READ_ATTRIBUTES": False, - "WRITE_ATTRIBUTES": False, - "DELETE": False, - "READ_ACL": False, - "WRITE_ACL": False, - "WRITE_OWNER": False, - "SYNCHRONIZE": True -} - -flagset = { - "FILE_INHERIT": False, - "DIRECTORY_INHERIT": False, - "INHERIT_ONLY": False, - "NO_PROPAGATE_INHERIT": False, - "INHERITED": False -} - - -def get_windows_sd(share, format="LOCAL"): - return call("smb.get_remote_acl", { - "server": "127.0.0.1", - "share": share, - "username": SMB_USER, - "password": SMB_PWD, - "options": {"output_format": format} - })['acl_data'] - - -def iter_permset(path, share, local_acl): - smbacl = get_windows_sd(share) - assert smbacl['acl'][0]['perms'] == permset - for perm in permset.keys(): - permset[perm] = True - call('filesystem.setacl', {'path': path, "dacl": local_acl}, job=True) - smbacl = get_windows_sd(share) - for ace in smbacl["acl"]: - if ace["id"] != 666: - continue - - assert ace["perms"] == permset, f'{perm}: {str(ace)}' - - -def iter_flagset(path, share, local_acl): - smbacl = get_windows_sd(share) - assert smbacl['acl'][0]['flags'] == flagset - for flag in flagset.keys(): - # we automatically canonicalize entries and so INHERITED shifts to end of list - flagset[flag] = True - call('filesystem.setacl', {'path': path, "dacl": local_acl}, job=True) - smbacl = get_windows_sd(share) - for ace in smbacl["acl"]: - if ace["id"] != 666: - continue - - assert ace["flags"] == flagset, f'{flag}: {str(ace)}' - - -@pytest.fixture(scope='module') -def initialize_for_smb_tests(request): - ba = call( - 'group.query', - [['name', '=', 'builtin_administrators']], - {'get': True} - ) - with user({ - 'username': SMB_USER, - 'full_name': SMB_USER, - 'group_create': True, - 'smb': True, - 'groups': [ba['id']], - 'password': SMB_PWD - }) as u: - try: - call('service.start', 'cifs') - yield {'user': u} - finally: - call('service.stop', 'cifs') - - -@pytest.mark.dependency(name="SMB_SERVICE_STARTED") -def test_001_initialize_for_tests(initialize_for_smb_tests): - TEST_DATA.update(initialize_for_smb_tests) - - -def test_003_test_perms(request): - """ - This test creates a temporary dataset / SMB share, - then iterates through all the possible permissions bits - setting local FS ace for each of them and verifies that - correct NT ACL bit gets toggled when viewed through SMB - protocol. - """ - depends(request, ["SMB_SERVICE_STARTED"], scope="session") - - with dataset('nfs4acl_perms_smb', {'share_type': 'SMB'}) as ds: - path = os.path.join('/mnt', ds) - with smb_share(path, "PERMS"): - the_acl = call('filesystem.getacl', path, False)['acl'] - the_acl.insert(0, { - 'perms': permset, - 'flags': flagset, - 'id': 666, - 'type': 'ALLOW', - 'tag': 'USER' - }) - call('filesystem.setacl', {'path': path, "dacl": the_acl}, job=True) - iter_permset(path, "PERMS", the_acl) - - -def test_004_test_flags(request): - """ - This test creates a temporary dataset / SMB share, - then iterates through all the possible inheritance flags - setting local FS ace for each of them and verifies that - correct NT ACL bit gets toggled when viewed through SMB - protocol. - """ - depends(request, ["SMB_SERVICE_STARTED"], scope="session") - - with dataset('nfs4acl_flags_smb', {'share_type': 'SMB'}) as ds: - path = os.path.join('/mnt', ds) - with smb_share(path, "FLAGS"): - the_acl = call('filesystem.getacl', path, False)['acl'] - the_acl.insert(0, { - 'perms': permset, - 'flags': flagset, - 'id': 666, - 'type': 'ALLOW', - 'tag': 'USER' - }) - call('filesystem.setacl', {'path': path, "dacl": the_acl}, job=True) - iter_flagset(path, "FLAGS", the_acl) - - -def test_005_test_map_modify(request): - """ - This test validates that we are generating an appropriate SD when user has - 'stripped' an ACL from an SMB share. Appropriate in this case means one that - grants an access mask equaivalent to MODIFY or FULL depending on whether it's - the file owner or group / other. - """ - depends(request, ["SMB_SERVICE_STARTED"], scope="session") - - ds = 'nfs4acl_map_modify' - path = f'/mnt/{pool_name}/{ds}' - with create_dataset(f'{pool_name}/{ds}', {'acltype': 'NFSV4', 'aclmode': 'PASSTHROUGH'}, None, 777): - with smb_share(path, "MAP_MODIFY"): - sd = get_windows_sd("MAP_MODIFY", "SMB") - dacl = sd['dacl'] - assert dacl[0]['access_mask']['standard'] == 'FULL', str(dacl[0]) - assert dacl[1]['access_mask']['special']['WRITE_ATTRIBUTES'], str(dacl[1]) - assert dacl[1]['access_mask']['special']['WRITE_EA'], str(dacl[1]) - assert dacl[2]['access_mask']['special']['WRITE_ATTRIBUTES'], str(dacl[2]) - assert dacl[2]['access_mask']['special']['WRITE_EA'], str(dacl[2]) - - -def test_006_test_preserve_dynamic_id_mapping(request): - depends(request, ["SMB_SERVICE_STARTED"], scope="session") - - def _find_owner_rights(acl, owner_rights_id): - for entry in acl: - if entry['id'] == owner_rights_id: - return True - - return False - - ds = 'nfs4acl_dynmamic_user' - path = f'/mnt/{pool_name}/{ds}' - with create_dataset(f'{pool_name}/{ds}', {'share_type': 'SMB'}): - with smb_share(path, "DYNAMIC"): - # add an ACL entry that forces generation - # of a dynamic idmap entry - sleep(5) - cmd = [ - 'smbcacls', - f'//{truenas_server.ip}/DYNAMIC', - '\\', - '-a', r'ACL:S-1-3-4:ALLOWED/0x0/FULL', - '-d', '0', - '-U', f'{SMB_USER}%{SMB_PWD}', - ] - res = subprocess.run(cmd, capture_output=True) - assert res.returncode == 0, res.stderr.decode() or res.stdout.decode() - - # Since winbindd is by default not in nsswitch when we're standalone - # the GID won't resolve to name - res = call('idmap.convert_sids', [OWNER_RIGHTS_SID]) - assert OWNER_RIGHTS_SID in res['mapped'] - assert res['mapped'][OWNER_RIGHTS_SID]['id_type'] == 'GROUP' - assert res['mapped'][OWNER_RIGHTS_SID]['name'].endswith('Owner Rights') - owner_rights_id = res['mapped'][OWNER_RIGHTS_SID]['id'] - - # verify "owner rights" entry is present - # verify "owner rights" entry is still present - the_acl = call('filesystem.getacl', path, False, True)['acl'] - has_owner_rights = _find_owner_rights(the_acl, owner_rights_id) - assert has_owner_rights is True, str(the_acl) - - # force re-sync of group mapping database (and winbindd_idmap.tdb) - call('smb.synchronize_group_mappings', job=True) - - # verify "owner rights" entry is still present - the_acl = call('filesystem.getacl', path, False, True)['acl'] - has_owner_rights = _find_owner_rights(the_acl, owner_rights_id) - assert has_owner_rights is True, str(the_acl) - - -def test_007_test_disable_autoinherit(request): - depends(request, ["SMB_SERVICE_STARTED"], scope="session") - ds = 'nfs4acl_disable_inherit' - path = f'/mnt/{pool_name}/{ds}' - with create_dataset(f'{pool_name}/{ds}', {'share_type': 'SMB'}): - with smb_share(path, 'NFS4_INHERIT'): - c = SMB() - c.connect(share='NFS4_INHERIT', username=SMB_USER, password=SMB_PWD, smb1=False) - c.mkdir('foo') - sd = c.get_sd('foo') - assert 'SEC_DESC_DACL_PROTECTED' not in sd['control']['parsed'], str(sd) - c.inherit_acl('foo', 'COPY') - sd = c.get_sd('foo') - assert 'SEC_DESC_DACL_PROTECTED' in sd['control']['parsed'], str(sd) - c.disconnect() - - -def test_008_test_prevent_smb_dataset_update(request): - """ - Prevent changing acltype and xattr on dataset hosting SMB shares - """ - ds_name = 'prevent_changes' - path = f'/mnt/{pool_name}/{ds_name}' - with create_dataset(f'{pool_name}/{ds_name}') as ds: - with smb_share(path, 'SMB_SHARE_1'): - # Create a second share for testing purposes - with smb_share(path, 'SMB_SHARE_2'): - - # Confirm we ignore requests that don't involve changes - for setting in [{"xattr": "SA"}, {"acltype": "POSIX"}]: - call('pool.dataset.update', ds, setting) - - # Confirm we block requests that involve changes - for setting in [{"xattr": "ON"}, {"acltype": "OFF"}]: - attrib = list(setting.keys())[0] - with pytest.raises(ValidationErrors) as ve: - call('pool.dataset.update', ds, setting) - assert ve.value.errors == [ - ValidationError( - f"pool_dataset_update.{attrib}", - RegexString("This dataset is hosting SMB shares. .*"), - errno.EINVAL, - ) - ] - assert "SMB_SHARE_2" in str(ve.value.errors[0]), ve.value.errors[0] diff --git a/tests/api2/test_428_smb_rpc.py b/tests/api2/test_428_smb_rpc.py deleted file mode 100644 index 51fa42fad5e1b..0000000000000 --- a/tests/api2/test_428_smb_rpc.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python3 - -import pytest -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET, POST -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.smb import smb_share -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call -from protocols import MS_RPC - - -SMB_USER = "smbrpcuser" -SMB_PWD = "smb1234#!@" -INVALID_SHARE_NAME_CHARACTERS = {'%', '<', '>', '*', '?', '|', '/', '\\', '+', '=', ';', ':', '"', ',', '[', ']'} - -@pytest.fixture(scope="module") -def setup_smb_share(request): - with dataset('rpc_test', data={'share_type': 'SMB'}) as ds: - with smb_share(os.path.join('/mnt', ds), "RPC_TEST", {"abe": True, "purpose": "NO_PRESET"}) as s: - yield {'dataset': ds, 'share': s} - -@pytest.fixture(autouse=True, scope="function") -def setup_smb_user(request): - with user({ - "username": SMB_USER, - "full_name": SMB_USER, - "group_create": True, - "home": "/var/empty", - "password": SMB_PWD, - }) as u: - yield u - - -def test_001_net_share_enum(setup_smb_user, setup_smb_share): - path = setup_smb_share['share']['path'] - share_name = setup_smb_share['share']['name'] - - with MS_RPC(username=SMB_USER, password=SMB_PWD) as hdl: - shares = hdl.shares() - # IPC$ share should always be present - assert len(shares) == 2, str(shares) - assert shares[0]['netname'] == 'IPC$' - assert shares[0]['path'] == 'C:\\tmp' - assert shares[1]['netname'] == share_name - assert shares[1]['path'].replace('\\', '/')[2:] == path - - -def test_002_enum_users(setup_smb_user, setup_smb_share): - results = GET('/user', payload={ - 'query-filters': [['username', '=', SMB_USER]], - 'query-options': { - 'get': True, - 'extra': {'additional_information': ['SMB']} - } - }) - assert results.status_code == 200, results.text - user_info = results.json() - - with MS_RPC(username=SMB_USER, password=SMB_PWD) as hdl: - entry = None - users = hdl.users() - for u in users: - if u['user'] != SMB_USER: - continue - - entry = u - break - - assert entry is not None, str(users) - rid = int(user_info['sid'].rsplit('-', 1)[1]) - assert rid == entry['rid'], str(entry) - - -def test_003_access_based_share_enum(setup_smb_user, setup_smb_share): - payload = { - 'share_name': "RPC_TEST", - 'share_acl': [ - { - 'ae_who_sid': 'S-1-5-32-544', - 'ae_perm': 'FULL', - 'ae_type': 'ALLOWED' - } - ] - } - results = POST("/sharing/smb/setacl", payload) - assert results.status_code == 200, results.text - - results = GET("/sharing/smb") - assert results.status_code == 200, results.text - - with MS_RPC(username=SMB_USER, password=SMB_PWD) as hdl: - shares = hdl.shares() - assert len(shares) == 1, str({"enum": shares, "shares": results.json()}) - - -def test_share_name_restricutions(setup_smb_share): - first_share = setup_smb_share['share'] - ds_name = setup_smb_share['dataset'] - - for char in INVALID_SHARE_NAME_CHARACTERS: - # First try updating existing share's name - with pytest.raises(ValidationErrors) as ve: - call('sharing.smb.update', first_share['id'], {'name': f'CANARY{char}'}) - - assert 'Share name contains the following invalid characters' in ve.value.errors[0].errmsg - - # Now try creating new share - with pytest.raises(ValidationErrors) as ve: - call('sharing.smb.create', {'path': os.path.join('/mnt', ds_name), 'name': f'CANARY{char}'}) - - assert 'Share name contains the following invalid characters' in ve.value.errors[0].errmsg - - - with pytest.raises(ValidationErrors) as ve: - call('sharing.smb.update', first_share['id'], {'name': 'CANARY\x85'}) - - assert 'Share name contains unicode control characters' in ve.value.errors[0].errmsg - - with pytest.raises(ValidationErrors) as ve: - call('sharing.smb.create', {'path': os.path.join('/mnt', ds_name), 'name': 'CANARY\x85'}) - - assert 'Share name contains unicode control characters' in ve.value.errors[0].errmsg diff --git a/tests/api2/test_430_smb_sharesec.py b/tests/api2/test_430_smb_sharesec.py deleted file mode 100644 index e8db0a8bde19d..0000000000000 --- a/tests/api2/test_430_smb_sharesec.py +++ /dev/null @@ -1,218 +0,0 @@ -import pytest -import sys -import os -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from middlewared.test.integration.assets.account import user as create_user -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.assets.smb import smb_share -from middlewared.test.integration.utils import call, client -from functions import PUT, POST, GET, DELETE, SSH_TEST -from auto_config import user, password - -Guests = { - "domain": "BUILTIN", - "name": "Guests", - "sidtype": "ALIAS" -} -Admins = { - "domain": "BUILTIN", - "name": "Administrators", - "sidtype": "ALIAS" -} -Users = { - "domain": "BUILTIN", - "name": "Users", - "sidtype": "ALIAS" -} - - -@pytest.fixture(scope="module") -def setup_smb_share(request): - global share_info - with dataset( - "smb-sharesec", - {'share_type': 'SMB'}, - ) as ds: - with smb_share(f'/mnt/{ds}', "my_sharesec") as share: - share_info = share - yield share - - -@pytest.fixture(scope="module") -def sharesec_user(): - with create_user({ - 'username': 'sharesec_user', - 'full_name': 'sharesec_user', - 'smb': True, - 'group_create': True, - 'password': 'test1234', - }) as u: - yield u - - -@pytest.mark.dependency(name="sharesec_initialized") -def test_02_initialize_share(setup_smb_share): - results = POST('/sharing/smb/getacl/', { - 'share_name': share_info['name'] - }) - assert results.status_code == 200, results.text - assert results.json()['share_name'].casefold() == share_info['name'].casefold() - assert len(results.json()['share_acl']) == 1 - - -def test_06_set_smb_acl_by_sid(request): - depends(request, ["sharesec_initialized"], scope="session") - payload = { - 'share_name': share_info['name'], - 'share_acl': [ - { - 'ae_who_sid': 'S-1-5-32-545', - 'ae_perm': 'FULL', - 'ae_type': 'ALLOWED' - } - ] - } - results = POST("/sharing/smb/setacl", payload) - assert results.status_code == 200, results.text - acl_set = results.json() - - assert payload['share_name'].casefold() == acl_set['share_name'].casefold() - assert payload['share_acl'][0]['ae_who_sid'] == acl_set['share_acl'][0]['ae_who_sid'] - assert payload['share_acl'][0]['ae_perm'] == acl_set['share_acl'][0]['ae_perm'] - assert payload['share_acl'][0]['ae_type'] == acl_set['share_acl'][0]['ae_type'] - assert acl_set['share_acl'][0]['ae_who_id']['id_type'] == 'GROUP' - - b64acl = call( - 'datastore.query', 'sharing.cifs.share', - [['cifs_name', '=', share_info['name']]], - {'get': True} - )['cifs_share_acl'] - - assert b64acl != "" - - call('smb.sharesec.synchronize_acls') - - newb64acl = call( - 'datastore.query', 'sharing.cifs.share', - [['cifs_name', '=', share_info['name']]], - {'get': True} - )['cifs_share_acl'] - - assert newb64acl == b64acl - - -@pytest.mark.dependency(name="sharesec_acl_set") -def test_07_set_smb_acl_by_unix_id(request, sharesec_user): - depends(request, ["sharesec_initialized"], scope="session") - payload = { - 'share_name': share_info['name'], - 'share_acl': [ - { - 'ae_who_id': {'id_type': 'USER', 'id': sharesec_user['uid']}, - 'ae_perm': 'CHANGE', - 'ae_type': 'ALLOWED' - } - ] - } - results = POST("/sharing/smb/setacl", payload) - assert results.status_code == 200, results.text - acl_set = results.json() - - assert payload['share_name'].casefold() == acl_set['share_name'].casefold() - assert payload['share_acl'][0]['ae_perm'] == acl_set['share_acl'][0]['ae_perm'] - assert payload['share_acl'][0]['ae_type'] == acl_set['share_acl'][0]['ae_type'] - assert acl_set['share_acl'][0]['ae_who_id']['id_type'] == 'USER' - assert acl_set['share_acl'][0]['ae_who_id']['id'] == sharesec_user['uid'] - assert acl_set['share_acl'][0]['ae_who_str'] == sharesec_user['username'] - - -def test_24_delete_share_info_tdb(request): - depends(request, ["sharesec_acl_set"], scope="session") - cmd = 'rm /var/db/system/samba4/share_info.tdb' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - -def test_25_verify_share_info_tdb_is_deleted(request): - depends(request, ["sharesec_acl_set"], scope="session") - cmd = 'test -f /var/db/system/samba4/share_info.tdb' - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, results['output'] - - results = POST("/sharing/smb/getacl", {'share_name': share_info['name']}) - assert results.status_code == 200, results.text - acl = results.json() - - assert acl['share_name'].casefold() == share_info['name'].casefold() - assert acl['share_acl'][0]['ae_who_sid'] == 'S-1-1-0' - - -def test_27_restore_sharesec_with_flush_share_info(request, sharesec_user): - depends(request, ["sharesec_acl_set"], scope="session") - with client() as c: - c.call('smb.sharesec._flush_share_info') - - results = POST("/sharing/smb/getacl", {'share_name': share_info['name']}) - assert results.status_code == 200, results.text - acl = results.json() - - assert acl['share_name'].casefold() == share_info['name'].casefold() - assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username'] - - -def test_29_verify_share_info_tdb_is_created(request): - depends(request, ["sharesec_acl_set"], scope="session") - cmd = 'test -f /var/db/system/samba4/share_info.tdb' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - -@pytest.mark.dependency(name="sharesec_rename") -def test_30_rename_smb_share_and_verify_share_info_moved(request, sharesec_user): - depends(request, ["sharesec_acl_set"], scope="session") - results = PUT(f"/sharing/smb/id/{share_info['id']}/", - {"name": "my_sharesec2"}) - assert results.status_code == 200, results.text - - results = POST("/sharing/smb/getacl", {'share_name': 'my_sharesec2'}) - assert results.status_code == 200, results.text - acl = results.json() - - share_info['name'] = 'my_sharesec2' - assert acl['share_name'].casefold() == share_info['name'].casefold() - assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username'] - - -def test_31_toggle_share_and_verify_acl_preserved(request, sharesec_user): - depends(request, ["sharesec_rename"], scope="session") - - results = PUT(f"/sharing/smb/id/{share_info['id']}/", - {"enabled": False}) - assert results.status_code == 200, results.text - - results = PUT(f"/sharing/smb/id/{share_info['id']}/", - {"enabled": True}) - assert results.status_code == 200, results.text - - results = POST("/sharing/smb/getacl", {'share_name': 'my_sharesec2'}) - assert results.status_code == 200, results.text - acl = results.json() - - assert acl['share_name'].casefold() == share_info['name'].casefold() - assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username'] - - # Abusive test, bypass normal APIs for share and - # verify that sync_registry call still preserves info. - call('datastore.update', 'sharing.cifs.share', share_info['id'], {'cifs_enabled': False}) - - call('sharing.smb.sync_registry', job=True) - - call('datastore.update', 'sharing.cifs.share', share_info['id'], {'cifs_enabled': True}) - - call('sharing.smb.sync_registry', job=True) - - acl = call('sharing.smb.getacl', {'share_name': 'my_sharesec2'}) - assert acl['share_name'].casefold() == share_info['name'].casefold() - assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username'] diff --git a/tests/api2/test_435_smb_registry.py b/tests/api2/test_435_smb_registry.py deleted file mode 100644 index bfe6db31184bc..0000000000000 --- a/tests/api2/test_435_smb_registry.py +++ /dev/null @@ -1,415 +0,0 @@ -import contextlib -import os -import pytest - -from middlewared.service_exception import ValidationError -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.assets.smb import smb_share -from middlewared.test.integration.utils import call, ssh - -DATASET_NAME = 'smb-reg' -SHARES = [f'REGISTRYTEST_{i}' for i in range(0, 5)] -PRESETS = [ - "DEFAULT_SHARE", - "ENHANCED_TIMEMACHINE", - "MULTI_PROTOCOL_NFS", - "PRIVATE_DATASETS", - "WORM_DROPBOX" -] -DETECTED_PRESETS = None - -""" -Note: following sample auxiliary parameters and comments were -provided by a community member for testing. They do not represent -the opinion or recommendation of iXsystems. -""" -SAMPLE_AUX = [ - 'follow symlinks = yes ', - 'veto files = /.windows/.mac/.zfs/', - '# needed explicitly for each share to prevent default being set', - 'admin users = MY_ACCOUNT', - '## NOTES:', '', - "; aio-fork might cause smbd core dump/signal 6 in log in v11.1- see bug report [https://redmine.ixsystems.com/issues/27470]. Looks helpful but disabled until clear if it's responsible.", '', '', - '### VFS OBJECTS (shadow_copy2 not included if no periodic snaps, so do it manually)', '', - '# Include recycle, crossrename, and exclude readonly, as share=RW', '', - '#vfs objects = zfs_space zfsacl winmsa streams_xattr recycle shadow_copy2 crossrename aio_pthread', '', - 'vfs objects = aio_pthread streams_xattr shadow_copy_zfs acl_xattr crossrename winmsa recycle', '', - '# testing without shadow_copy2', '', - 'valid users = MY_ACCOUNT @ALLOWED_USERS', - 'invalid users = root anonymous guest', - 'hide dot files = yes', -] - -SAMPLE_OPTIONS = [ - 'mangled names = no', - 'dos charset = CP850', - 'unix charset = UTF-8', - 'strict sync = no', - '', - 'min protocol = SMB2', - 'vfs objects = fruit streams_xattr ', - 'fruit:model = MacSamba', 'fruit:posix_rename = yes ', - 'fruit:veto_appledouble = no', - 'fruit:wipe_intentionally_left_blank_rfork = yes ', - 'fruit:delete_empty_adfiles = yes ', - '', - 'fruit:locking=none', - 'fruit:metadata=netatalk', - 'fruit:resource=file', - 'streams_xattr:prefix=user.', - 'streams_xattr:store_stream_type=no', - 'strict locking=auto', - '# oplocks=no # breaks Time Machine', - ' level2 oplocks=no', - '# spotlight=yes # invalid without further config' -] - - -@contextlib.contextmanager -def create_smb_share(path, share_name, mkdir=False, options=None): - cr_opts = options or {} - - if mkdir: - call('filesystem.mkdir', path) - - with smb_share(path, share_name, cr_opts) as share: - yield share - - -@contextlib.contextmanager -def setup_smb_shares(mountpoint): - SHARE_DICT = {} - - for share in SHARES: - share_path = os.path.join(mountpoint, share) - call('filesystem.mkdir', share_path) - new_share = call('sharing.smb.create', { - 'comment': 'My Test SMB Share', - 'name': share, - 'home': False, - 'path': share_path, - }) - SHARE_DICT[share] = new_share['id'] - - try: - yield SHARE_DICT - finally: - for share_id in SHARE_DICT.values(): - call('sharing.smb.delete', share_id) - - -@pytest.fixture(scope='module') -def setup_for_tests(): - with dataset(DATASET_NAME, data={'share_type': 'SMB'}) as ds: - smb_registry_mp = os.path.join('/mnt', ds) - call('filesystem.setperm', { - 'path': smb_registry_mp, - 'mode': '777', - 'options': {'stripacl': True, 'recursive': True} - }, job=True) - - with setup_smb_shares(smb_registry_mp) as shares: - yield (smb_registry_mp, ds, shares) - - -@pytest.fixture(scope='module') -def share_presets(): - yield call('sharing.smb.presets') - - -def test__setup_for_tests(setup_for_tests): - reg_shares = call('sharing.smb.reg_listshares') - for share in SHARES: - assert share in reg_shares - - -@pytest.mark.parametrize('smb_share', SHARES) -def test__rename_shares(setup_for_tests, smb_share): - mp, ds, SHARE_DICT = setup_for_tests - - call('sharing.smb.update', SHARE_DICT[smb_share], { - 'name': f'NEW_{smb_share}' - }) - - -def test__renamed_shares_in_registry(setup_for_tests): - """ - Share renames need to be explicitly tested because - it will actually result in share being removed from - registry and re-added with different name. - """ - reg_shares = call('sharing.smb.reg_listshares') - for share in SHARES: - assert f'NEW_{share}' in reg_shares - - assert len(reg_shares) == len(SHARES) - - -def check_aux_param(param, share, expected, fruit_enable=False): - val = call('smb.getparm', param, share) - if param == 'vfs objects': - expected_vfs_objects = expected.split() - # We have to override someone's poor life choices and insert - # vfs_fruit so that they don't have mysteriously broken time - # machine shares - if fruit_enable: - expected_vfs_objects.append('fruit') - - assert set(expected_vfs_objects) == set(val) - else: - assert val == expected - - -@pytest.mark.parametrize('preset', PRESETS) -def test__test_presets(setup_for_tests, share_presets, preset): - """ - This test iterates through SMB share presets, - applies them to a single share, and then validates - that the preset was applied correctly. - - In case of bool in API, simple check that appropriate - value is set in return from sharing.smb.update will - be sufficient. In case of auxiliary parameters, we - need to be a bit more thorough. The preset will not - be reflected in returned auxsmbconf and so we'll need - to directly reach out and run smb.getparm. - """ - mp, ds, SHARE_DICT = setup_for_tests - if 'TIMEMACHINE' in preset: - call('smb.update', {'aapl_extensions': True}) - - to_test = share_presets[preset]['params'] - to_test_aux = to_test['auxsmbconf'] - new_conf = call('sharing.smb.update', SHARE_DICT['REGISTRYTEST_0'], { - 'purpose': preset - }) - for entry in to_test_aux.splitlines(): - aux, val = entry.split('=', 1) - check_aux_param(aux.strip(), new_conf['name'], val.strip()) - - for k in to_test.keys(): - if k == "auxsmbconf": - continue - assert to_test[k] == new_conf[k] - - -def test__reset_smb(setup_for_tests): - """ - Remove all parameters that might turn us into - a MacOS-style SMB server (fruit). - """ - mp, ds, SHARE_DICT = setup_for_tests - call('sharing.smb.update', SHARE_DICT['REGISTRYTEST_0'], { - "purpose": "NO_PRESET", - "timemachine": False - }) - call('smb.update', {'aapl_extensions': False}) - - -def test__test_aux_param_on_update(setup_for_tests): - SHARE_DICT = setup_for_tests[2] - share_id = SHARE_DICT['REGISTRYTEST_0'] - share = call('sharing.smb.query', [['id', '=', share_id]], {'get': True}) - - old_aux = share['auxsmbconf'] - results = call('sharing.smb.update', share_id, { - 'auxsmbconf': '\n'.join(SAMPLE_AUX) - }) - new_aux = results['auxsmbconf'] - new_name = results['name'] - ncomments_sent = 0 - ncomments_recv = 0 - - for entry in old_aux.splitlines(): - """ - Verify that aux params from last preset applied - are still in effect. Parameters included in - SAMPLE_AUX will never be in a preset so risk of - collision is minimal. - """ - aux, val = entry.split('=', 1) - check_aux_param(aux.strip(), new_name, val.strip()) - - for entry in new_aux.splitlines(): - """ - Verify that non-comment parameters were successfully - applied to the running configuration. - """ - if not entry: - continue - - if entry.startswith(('#', ';')): - ncomments_recv += 1 - continue - - aux, val = entry.split('=', 1) - check_aux_param(aux.strip(), new_name, val.strip()) - - """ - Verify comments aren't being stripped on update - """ - for entry in SAMPLE_AUX: - if entry.startswith(('#', ';')): - ncomments_sent += 1 - - assert ncomments_sent == ncomments_recv, new_aux - - -@contextlib.contextmanager -def setup_aapl_extensions(newvalue): - oldvalue = call('smb.config')['aapl_extensions'] - try: - if oldvalue != newvalue: - call('smb.update', {'aapl_extensions': newvalue}) - yield - finally: - if oldvalue != newvalue: - call('smb.update', {'aapl_extensions': oldvalue}) - - -@pytest.fixture(scope='function') -def setup_tm_share(setup_for_tests): - share_name = 'AUX_CREATE' - path = os.path.join(setup_for_tests[0], share_name) - with setup_aapl_extensions(True): - with create_smb_share(path, share_name, True, { - "home": False, - "purpose": "ENHANCED_TIMEMACHINE", - "auxsmbconf": '\n'.join(SAMPLE_AUX) - }) as s: - yield s - - -def test__test_aux_param_on_create(share_presets, setup_tm_share): - share = setup_tm_share - new_aux = share['auxsmbconf'] - pre_aux = share_presets["ENHANCED_TIMEMACHINE"]["params"]["auxsmbconf"] - ncomments_sent = 0 - ncomments_recv = 0 - - for entry in pre_aux.splitlines(): - """ - Verify that aux params from preset were applied - successfully to the running configuration. - """ - aux, val = entry.split('=', 1) - check_aux_param(aux.strip(), share['name'], val.strip()) - - for entry in new_aux.splitlines(): - """ - Verify that non-comment parameters were successfully - applied to the running configuration. - """ - if not entry: - continue - - if entry.startswith(('#', ';')): - ncomments_recv += 1 - continue - - aux, val = entry.split('=', 1) - check_aux_param(aux.strip(), share['name'], val.strip(), True) - - """ - Verify comments aren't being stripped on update - """ - for entry in SAMPLE_AUX: - if entry.startswith(('#', ';')): - ncomments_sent += 1 - - assert ncomments_sent == ncomments_recv, f'new: {new_aux}, sample: {SAMPLE_AUX}' - - -def test__delete_shares(setup_for_tests): - SHARE_DICT = setup_for_tests[2] - for key in list(SHARE_DICT.keys()): - call('sharing.smb.delete', SHARE_DICT[key]) - SHARE_DICT.pop(key) - - reg_shares = call('sharing.smb.reg_listshares') - assert len(reg_shares) == 0, str(reg_shares) - - share_count = call('sharing.smb.query', [], {'count': True}) - assert share_count == 0 - - -""" -Following battery of tests validate behavior of registry -with regard to homes shares -""" - - -def test__create_homes_share(setup_for_tests): - mp, ds, share_dict = setup_for_tests - home_path = os.path.join(mp, 'HOME_SHARE') - call('filesystem.mkdir', home_path) - - new_share = call('sharing.smb.create', { - "comment": "My Test SMB Share", - "path": home_path, - "home": True, - "purpose": "NO_PRESET", - "name": 'HOME_SHARE', - }) - share_dict['HOME'] = new_share['id'] - - reg_shares = call('sharing.smb.reg_listshares') - assert any(['homes'.casefold() == s.casefold() for s in reg_shares]), str(reg_shares) - - -def test__toggle_homes_share(setup_for_tests): - mp, ds, share_dict = setup_for_tests - try: - call('sharing.smb.update', share_dict['HOME'], {'home': False}) - reg_shares = call('sharing.smb.reg_listshares') - assert not any(['homes'.casefold() == s.casefold() for s in reg_shares]), str(reg_shares) - finally: - call('sharing.smb.update', share_dict['HOME'], {'home': True}) - - reg_shares = call('sharing.smb.reg_listshares') - assert any(['homes'.casefold() == s.casefold() for s in reg_shares]), str(reg_shares) - - -def test__registry_rebuild_homes(setup_for_tests): - """ - Abusive test. - In this test we run behind middleware's back and - delete a our homes share from the registry, and then - attempt to rebuild by registry sync method. This - method is called (among other places) when the CIFS - service reloads. - """ - ssh('net conf delshare HOMES') - call('service.reload', 'cifs') - reg_shares = call('sharing.smb.reg_listshares') - assert any(['homes'.casefold() == s.casefold() for s in reg_shares]), str(reg_shares) - - -def test__test_smb_options(): - """ - Validate that user comments are preserved as-is - """ - new_config = call('smb.update', {'smb_options': '\n'.join(SAMPLE_OPTIONS)}) - assert new_config['smb_options'].splitlines() == SAMPLE_OPTIONS - - -def test__test_invalid_share_aux_param_create(setup_for_tests): - init_share_count = call('sharing.smb.query', [], {'count': True}) - with pytest.raises(ValidationError) as ve: - call('sharing.smb.create', {'name': 'FAIL', 'path': setup_for_tests[0], 'auxsmbconf': 'oplocks = canary'}) - - assert ve.value.attribute == 'sharingsmb_create.auxsmbconf' - - assert init_share_count == call('sharing.smb.query', [], {'count': True}) - - -def test__test_invalid_share_aux_param_update(setup_for_tests): - this_share = call('sharing.smb.create', {'name': 'FAIL', 'path': setup_for_tests[0]}) - - try: - with pytest.raises(ValidationError) as ve: - call('sharing.smb.update', this_share['id'], {'auxsmbconf': 'oplocks = canary'}) - finally: - call('sharing.smb.delete', this_share['id']) - - assert ve.value.attribute == 'sharingsmb_update.auxsmbconf' diff --git a/tests/api2/test_438_snapshots.py b/tests/api2/test_438_snapshots.py deleted file mode 100644 index aca2feddd1e49..0000000000000 --- a/tests/api2/test_438_snapshots.py +++ /dev/null @@ -1,521 +0,0 @@ -#!/usr/bin/env python3 -import os -import sys - -from middlewared.test.integration.assets.pool import dataset, snapshot - -apifolder = os.getcwd() -sys.path.append(apifolder) -from auto_config import pool_name -from functions import DELETE, GET, POST, PUT, wait_on_job - - -def _verify_snapshot_keys_present(snap, expected, unexpected): - """ - Verify that the snapshot returned by the query has the expected keys in its dict - and none of the unexpected ones. - - :param snap: a dict containing snapshot data - :param expected: a list of strings, expected key names in the dict - :param unexpected: a list of strings, key names that should not be in the dict - """ - assert set(expected).issubset(set(snap.keys())), f"Failed to get all expected keys: {snap.keys()}" - for key in unexpected: - assert key not in snap.keys(), f"Unexpectedly, was returned '{key}'" - - -def _verify_snapshot_against_config(snap, dataset_id, snap_config): - """ - Verify that the snapshot returned by the query has data that matches the data - returned then the dataset and snapshot were created. - - :param snap: a dict containing snapshot data - :param dataset_id: dataset name - :param snap_config: a dict containing the snapshot data (when it was created) - """ - assert snap['pool'] == dataset_id.split('/')[0], f"Incorrect pool: {snap}" - assert snap['name'] == snap_config['name'], f"Incorrect name: {snap}" - assert snap['type'] == "SNAPSHOT", f"Incorrect type: {snap}" - assert snap['snapshot_name'] == snap_config['snapshot_name'], f"Incorrect snapshot_name: {snap}" - assert snap['dataset'] == dataset_id, f"Incorrect dataset: {snap}" - assert snap['id'] == snap_config['id'], f"Incorrect id: {snap}" - assert isinstance(snap['createtxg'], str), f"Incorrect type for createtxg: {snap}" - assert snap['createtxg'] == snap_config['createtxg'], f"Incorrect createtxg: {snap}" - - -def _verify_snapshot_properties(snap, properties_list): - """ - Verify that the snapshot returned by the query has the expected items in its - 'properties' value. - - In the case of 'name' and 'createtxg' properties we perform additional checks - as this data should be present twice in snap. - - :param snap: a dict containing snapshot data - :param properties_list: a list of strings, key names of properties that should - be present in snap['properties'] - """ - for prop in properties_list: - assert prop in snap['properties'], f"Missing property: {prop}" - # Special checking if name requested - if 'name' in properties_list: - assert snap['properties']['name']['value'] == snap['name'], f"Name property does not match {snap['properties']['name']}" - if 'createtxg' in properties_list: - assert snap['properties']['createtxg']['value'] == snap['createtxg'], f"createtxg property does not match {snap['properties']['name']}" - -# -# Snapshot query: filter by dataset name -# - -def _test_xxx_snapshot_query_filter_dataset(dataset_name, properties_list, - expected_keys = ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg'], - unexpected_keys = ['properties']): - """ - Perform snapshot queries, filtered by dataset name. - - :param dataset_name: a string, the name of the dataset to be created and used in queries. - :param properties_list: a list of strings, the names to be queried in snapshot properties option - :expected_keys: a list of strings, the key names expected to be present in the snapshot dict - :unexpected_keys: a list of strings, the key names expected NOT to be present in the snapshot dict - """ - with dataset(dataset_name) as dataset_id: - with snapshot(dataset_id, "snap01", get=True) as snap01_config: - payload = { - 'query-filters': [['dataset', '=', dataset_id]], - 'query-options': { - 'extra': { - 'properties': properties_list - } - } - } - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - # Check that we have one snap returned and that it has the expected - # data - assert len(snaps) == 1, snaps - snap = snaps[0] - _verify_snapshot_keys_present(snap, expected_keys, unexpected_keys) - _verify_snapshot_against_config(snap, dataset_id, snap01_config) - if 'properties' not in unexpected_keys: - _verify_snapshot_properties(snap, properties_list) - - # Now create another snapshot and re-issue the query to check the - # new results. - with snapshot(dataset_id, "snap02", get=True) as snap02_config: - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - # Check that we have two snaps returned and that they have the expected - # data. - assert len(snaps) == 2, snaps - - # Need to sort the snaps by createtxg - ssnaps = sorted(snaps, key=lambda d: int(d['createtxg'])) - snap01 = ssnaps[0] - snap02 = ssnaps[1] - _verify_snapshot_keys_present(snap01, expected_keys, unexpected_keys) - _verify_snapshot_against_config(snap01, dataset_id, snap01_config) - _verify_snapshot_keys_present(snap02, expected_keys, unexpected_keys) - _verify_snapshot_against_config(snap02, dataset_id, snap02_config) - if 'properties' not in unexpected_keys: - _verify_snapshot_properties(snap01, properties_list) - _verify_snapshot_properties(snap02, properties_list) - - existing_snaps = {snap01['createtxg'], snap02['createtxg']} - - # Now create *another* dataset and snapshot and ensure we - # only see the snapshots we're supposed to. - with dataset(f"{dataset_name}2") as dataset2: - with snapshot(dataset2, "snap03", get=True) as snap03_config: - # First issue the original query again & ensure we still have - # the expected snapshots - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - assert len(snaps) == 2, snaps - for snap in snaps: - assert snap['createtxg'] in existing_snaps, f"Got unexpected snap: {snap}" - - # Next issue the query with a different filter - payload.update({ - 'query-filters': [['dataset', '=', dataset2]] - }) - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - assert len(snaps) == 1, snaps - snap = snaps[0] - assert snap['createtxg'] not in existing_snaps, f"Got unexpected snap: {snap}" - new_snaps = {snap['createtxg']} - _verify_snapshot_keys_present(snap, expected_keys, unexpected_keys) - _verify_snapshot_against_config(snap, dataset2, snap03_config) - if 'properties' not in unexpected_keys: - _verify_snapshot_properties(snap, properties_list) - - # Next issue the query with a bogus filter - payload.update({ - 'query-filters': [['dataset', '=', f"{dataset_name}-BOGUS"]] - }) - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - assert len(snaps) == 0, snaps - - # Next issue the query WITHOUT a filter. It's possible - # that this test could be run while other snapshots are - # present, so take that into account during checks, e.g. - # assert count >= 3 rather than == 3 - payload.update({ - 'query-filters': [] - }) - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - assert len(snaps) >= 3, len(snaps) - all_snaps = set([s['createtxg'] for s in snaps]) - assert existing_snaps.issubset(all_snaps), "Existing snaps not returned in filterless query" - assert new_snaps.issubset(all_snaps), "New snaps not returned in filterless query" - - # Let the snap03 get cleaned up, and then ensure even with a filterless query - # that it is no longer returned. - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - assert len(snaps) >= 2, len(snaps) - all_snaps = set([s['createtxg'] for s in snaps]) - assert existing_snaps.issubset(all_snaps), "Existing snaps not returned in filterless query" - assert not new_snaps.issubset(all_snaps), "New snaps returned in filterless query" - - -def _test_simple_snapshot_query_filter_dataset(dataset_name, properties_list): - """ - Perform simple snapshot queries, filtered by dataset name. - - :param dataset_name: a string, the name of the dataset to be created and used in queries. - :param properties_list: a list of strings, the names to be queried in snapshot properties option - """ - _test_xxx_snapshot_query_filter_dataset(dataset_name, properties_list, - expected_keys = ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg'], - unexpected_keys = ['properties']) - - -def _test_full_snapshot_query_filter_dataset(dataset_name, properties_list): - """ - Perform non-simple (non fast-path) snapshot queries, filtered by dataset name. - - :param dataset_name: a string, the name of the dataset to be created and used in queries. - :param properties_list: a list of strings, the names to be queried in snapshot properties option - """ - _test_xxx_snapshot_query_filter_dataset(dataset_name, properties_list, - ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg', 'properties'], - []) - - -def test_01_snapshot_query_filter_dataset_props_name(request): - """ - Test snapshot query, filtered by dataset with properties option: 'name' - - The results should be simple (fast-path) without 'properties'. - """ - _test_simple_snapshot_query_filter_dataset("ds-snapshot-simple-query-name", ['name']) - - -def test_02_snapshot_query_filter_dataset_props_createtxg(request): - """ - Test snapshot query, filtered by dataset with properties option: 'createtxg' - - The results should be simple (fast-path) without 'properties'. - """ - _test_simple_snapshot_query_filter_dataset("ds-snapshot-simple-query-createtxg", ['createtxg']) - - -def test_03_snapshot_query_filter_dataset_props_name_createtxg(request): - """ - Test snapshot query, filtered by dataset with properties option: 'name', 'createtxg' - - The results should be simple (fast-path) without 'properties'. - """ - _test_simple_snapshot_query_filter_dataset("ds-snapshot-simple-query-name-createtxg", ['name', 'createtxg']) - _test_simple_snapshot_query_filter_dataset("ds-snapshot-simple-query-createtxg-name", ['createtxg', 'name']) - - -def test_04_snapshot_query_filter_dataset_props_used(request): - """ - Test snapshot query, filtered by dataset including properties option: 'used' - - The results should be regular (NON fast-path) query that returns 'properties'. - """ - _test_full_snapshot_query_filter_dataset("ds-snapshot-simple-query-createtxg", ['used']) - _test_full_snapshot_query_filter_dataset("ds-snapshot-simple-query-createtxg", ['used', 'name']) - _test_full_snapshot_query_filter_dataset("ds-snapshot-simple-query-createtxg", ['used', 'name', 'createtxg']) - _test_full_snapshot_query_filter_dataset("ds-snapshot-simple-query-createtxg", ['used', 'createtxg']) - - -# -# Snapshot query: filter by snapshot name -# - -def _test_xxx_snapshot_query_filter_snapshot(dataset_name, properties_list, expected_keys, unexpected_keys): - """ - Perform snapshot queries, filtered by snapshot name. - - :param dataset_name: a string, the name of the dataset to be created and used in queries. - :param properties_list: a list of strings, the names to be queried in snapshot properties option - :expected_keys: a list of strings, the key names expected to be present in the snapshot dict - :unexpected_keys: a list of strings, the key names expected NOT to be present in the snapshot dict - """ - with dataset(dataset_name) as dataset_id: - with snapshot(dataset_id, "snap01", get=True) as snap01_config: - with snapshot(dataset_id, "snap02", get=True) as snap02_config: - # Query snap01 - payload = { - 'query-filters': [['name', '=', snap01_config['name']]], - 'query-options': { - 'extra': { - 'properties': properties_list - } - } - } - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - # Check that we have one snap returned and that it has the expected - # data - assert len(snaps) == 1, snaps - snap = snaps[0] - _verify_snapshot_keys_present(snap, expected_keys, unexpected_keys) - _verify_snapshot_against_config(snap, dataset_id, snap01_config) - if 'properties' not in unexpected_keys: - _verify_snapshot_properties(snap, properties_list) - - # Query snap02 - payload = { - 'query-filters': [['name', '=', snap02_config['name']]], - 'query-options': { - 'extra': { - 'properties': properties_list - } - } - } - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - # Check that we have one snap returned and that it has the expected - # data - assert len(snaps) == 1, snaps - snap = snaps[0] - _verify_snapshot_keys_present(snap, expected_keys, unexpected_keys) - _verify_snapshot_against_config(snap, dataset_id, snap02_config) - if 'properties' not in unexpected_keys: - _verify_snapshot_properties(snap, properties_list) - - # Allow snap02 to be destroyed, then query again to make sure we don't get it - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - assert len(snaps) == 0, snaps - - -def _test_simple_snapshot_query_filter_snapshot(dataset_name, properties_list): - """ - Perform simple snapshot queries, filtered by snapshot name. - - :param dataset_name: a string, the name of the dataset to be created and used in queries. - :param properties_list: a list of strings, the names to be queried in snapshot properties option - """ - _test_xxx_snapshot_query_filter_snapshot(dataset_name, properties_list, - expected_keys = ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg'], - unexpected_keys = ['properties']) - - -def _test_full_snapshot_query_filter_snapshot(dataset_name, properties_list): - """ - Perform non-simple (non fast-path) snapshot queries, filtered by snapshot name. - - :param dataset_name: a string, the name of the dataset to be created and used in queries. - :param properties_list: a list of strings, the names to be queried in snapshot properties option - """ - _test_xxx_snapshot_query_filter_snapshot(dataset_name, properties_list, - ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg', 'properties'], - []) - - -def test_05_snapshot_query_filter_snapshot_props_name(request): - """ - Test snapshot query, filtered by snapshot with properties option: 'name' - - The results should be simple (fast-path) without 'properties'. - """ - _test_simple_snapshot_query_filter_snapshot("ds-snapshot-simple-query-name", ['name']) - - -def test_06_snapshot_query_filter_snapshot_props_createtxg(request): - """ - Test snapshot query, filtered by snapshot with properties option: 'createtxg' - - The results should be simple (fast-path) without 'properties'. - """ - _test_simple_snapshot_query_filter_snapshot("ds-snapshot-simple-query-createtxg", ['createtxg']) - - -def test_07_snapshot_query_filter_snapshot_props_name_createtxg(request): - """ - Test snapshot query, filtered by snapshot with properties option: 'name', 'createtxg' - - The results should be simple (fast-path) without 'properties'. - """ - _test_simple_snapshot_query_filter_snapshot("ds-snapshot-simple-query-name-createtxg", ['name', 'createtxg']) - _test_simple_snapshot_query_filter_snapshot("ds-snapshot-simple-query-createtxg-name", ['createtxg', 'name']) - - -def test_08_snapshot_query_filter_snapshot_props_used(request): - """ - Test snapshot query, filtered by snapshot including properties option: 'used' - - The results should be regular (NON fast-path) query that returns 'properties'. - """ - _test_full_snapshot_query_filter_snapshot("ds-snapshot-simple-query-createtxg", ['used']) - _test_full_snapshot_query_filter_snapshot("ds-snapshot-simple-query-createtxg", ['used', 'name']) - _test_full_snapshot_query_filter_snapshot("ds-snapshot-simple-query-createtxg", ['used', 'name', 'createtxg']) - _test_full_snapshot_query_filter_snapshot("ds-snapshot-simple-query-createtxg", ['used', 'createtxg']) - - -# -# Snapshot query: filter by pool name -# - -def _test_xxx_snapshot_query_filter_pool(dataset_name, properties_list, expected_keys, unexpected_keys): - """ - Perform snapshot queries, filtered by pool name. - - :param dataset_name: a string, the name of the dataset to be created and used in queries. - :param properties_list: a list of strings, the names to be queried in snapshot properties option - :expected_keys: a list of strings, the key names expected to be present in the snapshot dict - :unexpected_keys: a list of strings, the key names expected NOT to be present in the snapshot dict - """ - with dataset(dataset_name) as dataset_id: - # Before we create any snapshots for this test, query snapshots - payload = { - 'query-filters': [['pool', '=', pool_name]], - 'query-options': { - 'extra': { - 'properties': properties_list - } - } - } - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - original_snap_count = len(snaps) - - with snapshot(dataset_id, "snap01", get=True) as snap01_config: - with snapshot(dataset_id, "snap02", get=True) as snap02_config: - # Query again - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - - # Check that we have two additional snap returned and that - # they have the expected data - assert len(snaps) == original_snap_count+2, snaps - ssnaps = sorted(snaps, key=lambda d: int(d['createtxg'])) - snap01 = ssnaps[-2] - snap02 = ssnaps[-1] - _verify_snapshot_keys_present(snap01, expected_keys, unexpected_keys) - _verify_snapshot_against_config(snap01, dataset_id, snap01_config) - _verify_snapshot_keys_present(snap02, expected_keys, unexpected_keys) - _verify_snapshot_against_config(snap02, dataset_id, snap02_config) - if 'properties' not in unexpected_keys: - _verify_snapshot_properties(snap01, properties_list) - _verify_snapshot_properties(snap02, properties_list) - - # Allow snap02 to be destroyed & query again. - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - - assert len(snaps) == original_snap_count+1, snaps - ssnaps = sorted(snaps, key=lambda d: int(d['createtxg'])) - snap01 = ssnaps[-1] - _verify_snapshot_keys_present(snap01, expected_keys, unexpected_keys) - _verify_snapshot_against_config(snap01, dataset_id, snap01_config) - if 'properties' not in unexpected_keys: - _verify_snapshot_properties(snap01, properties_list) - - -def _test_simple_snapshot_query_filter_pool(dataset_name, properties_list): - """ - Perform simple snapshot queries, filtered by pool name. - - :param dataset_name: a string, the name of the dataset to be created and used in queries. - :param properties_list: a list of strings, the names to be queried in snapshot properties option - """ - _test_xxx_snapshot_query_filter_pool(dataset_name, properties_list, - expected_keys = ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg'], - unexpected_keys = ['properties']) - - -def _test_full_snapshot_query_filter_pool(dataset_name, properties_list): - """ - Perform non-simple (non fast-path) snapshot queries, filtered by pool name. - - :param dataset_name: a string, the name of the dataset to be created and used in queries. - :param properties_list: a list of strings, the names to be queried in snapshot properties option - """ - _test_xxx_snapshot_query_filter_pool(dataset_name, properties_list, - ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg', 'properties'], - []) - - -def test_09_snapshot_query_filter_pool_props_name(request): - """ - Test snapshot query, filtered by pool with properties option: 'name' - - The results should be simple (fast-path) without 'properties'. - """ - _test_simple_snapshot_query_filter_pool("ds-snapshot-simple-query-name", ['name']) - - -def test_10_snapshot_query_filter_pool_props_createtxg(request): - """ - Test snapshot query, filtered by pool with properties option: 'createtxg' - - The results should be simple (fast-path) without 'properties'. - """ - _test_simple_snapshot_query_filter_pool("ds-snapshot-simple-query-createtxg", ['createtxg']) - - -def test_11_snapshot_query_filter_pool_props_name_createtxg(request): - """ - Test snapshot query, filtered by pool with properties option: 'name', 'createtxg' - - The results should be simple (fast-path) without 'properties'. - """ - _test_simple_snapshot_query_filter_pool("ds-snapshot-simple-query-name-createtxg", ['name', 'createtxg']) - _test_simple_snapshot_query_filter_pool("ds-snapshot-simple-query-createtxg-name", ['createtxg', 'name']) - - -def test_12_snapshot_query_filter_pool_props_used(request): - """ - Test snapshot query, filtered by pool including properties option: 'used' - - The results should be regular (NON fast-path) query that returns 'properties'. - """ - _test_full_snapshot_query_filter_pool("ds-snapshot-simple-query-createtxg", ['used']) - _test_full_snapshot_query_filter_pool("ds-snapshot-simple-query-createtxg", ['used', 'name']) - _test_full_snapshot_query_filter_pool("ds-snapshot-simple-query-createtxg", ['used', 'name', 'createtxg']) - _test_full_snapshot_query_filter_pool("ds-snapshot-simple-query-createtxg", ['used', 'createtxg']) diff --git a/tests/api2/test_440_snmp.py b/tests/api2/test_440_snmp.py deleted file mode 100644 index eaba458e119dd..0000000000000 --- a/tests/api2/test_440_snmp.py +++ /dev/null @@ -1,445 +0,0 @@ -#!/usr/bin/env python3 -# License: BSD - -import os -import pytest - -from time import sleep - -from contextlib import ExitStack -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.pool import dataset, snapshot -from middlewared.test.integration.assets.filesystem import directory, mkfile -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.utils.client import truenas_server -from middlewared.test.integration.utils.system import reset_systemd_svcs -from pysnmp.hlapi import (CommunityData, ContextData, ObjectIdentity, - ObjectType, SnmpEngine, UdpTransportTarget, getCmd) - - -from auto_config import ha, interface, password, user, pool_name -from functions import async_SSH_done, async_SSH_start - -skip_ha_tests = pytest.mark.skipif(not (ha and "virtual_ip" in os.environ), reason="Skip HA tests") -COMMUNITY = 'public' -TRAPS = False -CONTACT = 'root@localhost.com' -LOCATION = 'Maryville, TN' -PASSWORD = 'testing1234' -SNMP_USER_NAME = 'snmpJoe' -SNMP_USER_AUTH = 'MD5' -SNMP_USER_PWD = "abcd1234" -SNMP_USER_PRIV = 'AES' -SNMP_USER_PHRS = "A priv pass phrase" -SNMP_USER_CONFIG = { - "v3": True, - "v3_username": SNMP_USER_NAME, - "v3_authtype": SNMP_USER_AUTH, - "v3_password": SNMP_USER_PWD, - "v3_privproto": SNMP_USER_PRIV, - "v3_privpassphrase": SNMP_USER_PHRS -} - - -EXPECTED_DEFAULT_CONFIG = { - "location": "", - "contact": "", - "traps": False, - "v3": False, - "community": "public", - "v3_username": "", - "v3_authtype": "SHA", - "v3_password": "", - "v3_privproto": None, - "v3_privpassphrase": None, - "options": "", - "loglevel": 3, - "zilstat": False -} - -EXPECTED_DEFAULT_STATE = { - "enable": False, - "state": "STOPPED", -} - -CMD_STATE = { - "RUNNING": "start", - "STOPPED": "stop" -} - - -# ===================================================================== -# Fixtures and utilities -# ===================================================================== -@pytest.fixture(scope='module') -def initialize_and_start_snmp(): - """ Initialize and start SNMP """ - try: - # Get initial config and start SNMP - orig_config = call('snmp.config') - call('service.start', 'snmp') - yield orig_config - finally: - # Restore default config (which will also delete any created user), - # stop SNMP and restore default enable state - call('snmp.update', EXPECTED_DEFAULT_CONFIG) - call(f'service.{CMD_STATE[EXPECTED_DEFAULT_STATE["state"]]}', 'snmp') - call('service.update', 'snmp', {"enable": EXPECTED_DEFAULT_STATE['enable']}) - - -@pytest.fixture(scope='class') -def add_SNMPv3_user(): - # Reset the systemd restart counter - reset_systemd_svcs("snmpd snmp-agent") - - call('snmp.update', SNMP_USER_CONFIG) - assert get_systemctl_status('snmp-agent') == "RUNNING" - - res = call('snmp.get_snmp_users') - assert SNMP_USER_NAME in res - yield - - -@pytest.fixture(scope='function') -def create_nested_structure(): - """ - Create the following structure: - tank -+-> dataset_1 -+-> dataset_2 -+-> dataset_3 - |-> zvol_1a |-> zvol-L_2a |-> zvol L_3a - |-> zvol_1b |-> zvol-L_2b |-> zvol L_3b - |-> file_1 |-> file_2 |-> file_3 - |-> dir_1 |-> dir_2 |-> dir_3 - TODO: Make this generic and move to assets - """ - ds_path = "" - ds_list = [] - zv_list = [] - dir_list = [] - file_list = [] - # Test '-' and ' ' in the name (we skip index 0) - zvol_name = ["bogus", "zvol", "zvol-L", "zvol L"] - with ExitStack() as es: - - for i in range(1, 4): - preamble = f"{ds_path + '/' if i > 1 else ''}" - vol_path = f"{preamble}{zvol_name[i]}_{i}" - - # Create zvols - for c in crange('a', 'b'): - zv = es.enter_context(dataset(vol_path + c, {"type": "VOLUME", "volsize": 1048576})) - zv_list.append(zv) - - # Create directories - d = es.enter_context(directory(f"/mnt/{pool_name}/{preamble}dir_{i}")) - dir_list.append(d) - - # Create files - f = es.enter_context(mkfile(f"/mnt/{pool_name}/{preamble}file_{i}", 1048576)) - file_list.append(f) - - # Create datasets - ds_path += f"{'/' if i > 1 else ''}dataset_{i}" - ds = es.enter_context(dataset(ds_path)) - ds_list.append(ds) - - yield {'zv': zv_list, 'ds': ds_list, 'dir': dir_list, 'file': file_list} - - -def crange(c1, c2): - """ - Generates the characters from `c1` to `c2`, inclusive. - Simple lowercase ascii only. - NOTE: Not safe for runtime code - """ - ord_a = 97 - ord_z = 122 - c1_ord = ord(c1) - c2_ord = ord(c2) - assert c1_ord < c2_ord, f"'{c1}' must be 'less than' '{c2}'" - assert ord_a <= c1_ord <= ord_z - assert ord_a <= c2_ord <= ord_z - for c in range(c1_ord, c2_ord + 1): - yield chr(c) - - -def get_systemctl_status(service): - """ Return 'RUNNING' or 'STOPPED' """ - try: - res = ssh(f'systemctl status {service}') - except AssertionError: - # Return code is non-zero if service is not running - return "STOPPED" - - action = [line for line in res.splitlines() if line.lstrip().startswith('Active')] - return "RUNNING" if action[0].split()[2] == "(running)" else "STOPPED" - - -def get_sysname(hostip, community): - iterator = getCmd(SnmpEngine(), - CommunityData(community), - UdpTransportTarget((hostip, 161)), - ContextData(), - ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysName', 0))) - errorIndication, errorStatus, errorIndex, varBinds = next(iterator) - assert errorIndication is None, errorIndication - assert errorStatus == 0, errorStatus - value = str(varBinds[0]) - _prefix = "SNMPv2-MIB::sysName.0 = " - assert value.startswith(_prefix), value - return value[len(_prefix):] - - -def validate_snmp_get_sysname_uses_same_ip(hostip): - """Test that when we query a particular interface by SNMP the response comes from the same IP.""" - - # Write the test in a manner that is portable between Linux and FreeBSD ... which means - # *not* using 'any' as the interface name. We will use the interface supplied by the - # test runner instead. - print(f"Testing {hostip} ", end='') - p = async_SSH_start(f"tcpdump -t -i {interface} -n udp port 161 -c2", user, password, hostip) - # Give some time so that the tcpdump has started before we proceed - sleep(5) - - get_sysname(hostip, COMMUNITY) - - # Now collect and process the tcpdump output - outs, errs = async_SSH_done(p, 20) - output = outs.strip() - assert len(output), f"No output from tcpdump:{outs}" - lines = output.split("\n") - assert len(lines) == 2, f"Unexpected number of lines output by tcpdump: {outs}" - for line in lines: - assert line.split()[0] == 'IP' - # print(errs) - get_dst = lines[0].split()[3].rstrip(':') - reply_src = lines[1].split()[1] - assert get_dst == reply_src - assert get_dst.endswith(".161") - - -def user_list_users(snmp_config): - """Run an snmpwalk as a SNMP v3 user""" - - add_cmd = None - if snmp_config['v3_privproto']: - authpriv_setting = 'authPriv' - add_cmd = f"-x {snmp_config['v3_privproto']} -X \"{snmp_config['v3_privpassphrase']}\" " - else: - authpriv_setting = 'authNoPriv' - - cmd = f"snmpwalk -v3 -u {snmp_config['v3_username']} -l {authpriv_setting} " - cmd += f"-a {snmp_config['v3_authtype']} -A {snmp_config['v3_password']} " - if add_cmd: - cmd += add_cmd - cmd += "localhost iso.3.6.1.6.3.15.1.2.2.1.3" - - # This call will timeout if SNMP is not running - res = ssh(cmd) - return [x.split(':')[-1].strip(' \"') for x in res.splitlines()] - - -def v2c_snmpwalk(mib): - """ - Run snmpwalk with v2c protocol - mib is the item to be gathered. mib format examples: - iso.3.6.1.6.3.15.1.2.2.1.3 - 1.3.6.1.4.1.50536.1.2 - """ - cmd = f"snmpwalk -v2c -cpublic localhost {mib}" - - # This call will timeout if SNMP is not running - res = ssh(cmd) - return [x.split(':')[-1].strip(' \"') for x in res.splitlines()] - - -# ===================================================================== -# Tests -# ===================================================================== -class TestSNMP: - - def test_configure_SNMP(self, initialize_and_start_snmp): - config = initialize_and_start_snmp - - # We should be starting with the default config - # Check the hard way so that we can identify the culprit - for k, v in EXPECTED_DEFAULT_CONFIG.items(): - assert config.get(k) == v, f'Expected {k}:"{v}", but found {k}:"{config.get(k)}"' - - # Make some changes that will be checked in a later test - call('snmp.update', { - 'community': COMMUNITY, - 'traps': TRAPS, - 'contact': CONTACT, - 'location': LOCATION - }) - - def test_enable_SNMP_service_at_boot(self): - id = call('service.update', 'snmp', {'enable': True}) - assert isinstance(id, int) - - res = call('service.query', [['service', '=', 'snmp']]) - assert res[0]['enable'] is True - - def test_SNMP_service_is_running(self): - res = call('service.query', [['service', '=', 'snmp']]) - assert res[0]['state'] == 'RUNNING' - - def test_SNMP_settings_are_preserved(self): - data = call('snmp.config') - assert data['community'] == COMMUNITY - assert data['traps'] == TRAPS - assert data['contact'] == CONTACT - assert data['location'] == LOCATION - - def test_sysname_reply_uses_same_ip(self): - validate_snmp_get_sysname_uses_same_ip(truenas_server.ip) - - @skip_ha_tests - def test_ha_sysname_reply_uses_same_ip(self): - validate_snmp_get_sysname_uses_same_ip(truenas_server.ip) - validate_snmp_get_sysname_uses_same_ip(truenas_server.nodea_ip) - validate_snmp_get_sysname_uses_same_ip(truenas_server.nodeb_ip) - - def test_SNMPv3_private_user(self): - """ - The SNMP system user should always be available - """ - # Reset the systemd restart counter - reset_systemd_svcs("snmpd snmp-agent") - - # Make sure the createUser command is not present - res = ssh("tail -2 /var/lib/snmp/snmpd.conf") - assert 'createUser' not in res - - # Make sure the SNMP system user is a rwuser - res = ssh("cat /etc/snmp/snmpd.conf") - assert "rwuser snmpSystemUser" in res - - # List the SNMP users and confirm the system user - # This also confirms the functionality of the system user - res = call('snmp.get_snmp_users') - assert "snmpSystemUser" in res - - @pytest.mark.parametrize('payload,attrib,errmsg', [ - ({'v3': False, 'community': ''}, - 'snmp_update.community', 'This field is required when SNMPv3 is disabled'), - ({'v3': True}, - 'snmp_update.v3_username', 'This field is required when SNMPv3 is enabled'), - ({'v3_authtype': 'AES'}, - 'snmp_update.v3_authtype', 'Invalid choice: AES'), - ({'v3': True, 'v3_authtype': 'MD5'}, - 'snmp_update.v3_username', 'This field is required when SNMPv3 is enabled'), - ({'v3_password': 'short'}, - 'snmp_update.v3_password', 'Password must contain at least 8 characters'), - ({'v3_privproto': 'SHA'}, - 'snmp_update.v3_privproto', 'Invalid choice: SHA'), - ({'v3_privproto': 'AES'}, - 'snmp_update.v3_privpassphrase', 'This field is required when SNMPv3 private protocol is specified'), - ]) - def test_v3_validators(self, payload, attrib, errmsg): - """ - All these configuration updates should fail. - """ - with pytest.raises(ValidationErrors) as ve: - call('snmp.update', payload) - if attrib: - assert f"{attrib}" in ve.value.errors[0].attribute - if errmsg: - assert f"{errmsg}" in ve.value.errors[0].errmsg - - @pytest.mark.usefixtures("add_SNMPv3_user") - class TestSNMPv3User: - def test_SNMPv3_user_function(self): - res = user_list_users(SNMP_USER_CONFIG) - assert SNMP_USER_NAME in res, f"Expected to find {SNMP_USER_NAME} in {res}" - - def test_SNMPv3_user_retained_across_service_restart(self): - # Reset the systemd restart counter - reset_systemd_svcs("snmpd snmp-agent") - - res = call('service.stop', 'snmp') - assert res is False - res = call('service.start', 'snmp') - assert res is True - res = call('snmp.get_snmp_users') - assert "snmpSystemUser" in res - assert SNMP_USER_NAME in res - - def test_SNMPv3_user_retained_across_v3_disable(self): - - # Disable and check - res = call('snmp.update', {'v3': False}) - assert SNMP_USER_NAME in res['v3_username'] - res = call('snmp.get_snmp_users') - assert SNMP_USER_NAME in res - - # Enable and check - res = call('snmp.update', {'v3': True}) - assert SNMP_USER_NAME in res['v3_username'] - res = call('snmp.get_snmp_users') - assert SNMP_USER_NAME in res - - @pytest.mark.parametrize('key,value', [ - ('reset', ''), # Reset systemd counters - ('v3_username', 'ixUser'), - ('v3_authtype', 'SHA'), - ('v3_password', 'SimplePassword'), - ('reset', ''), # Reset systemd counters - ('v3_privproto', 'DES'), - ('v3_privpassphrase', 'Pass phrase with spaces'), - # Restore original user name - ('v3_username', SNMP_USER_NAME) - ]) - def test_SNMPv3_user_changes(self, key, value): - """ - Make changes to the SNMPv3 user name, password, etc. and confirm user function. - This also tests a pass phrase that includes spaces. - NOTE: We include systemd counter resets because these calls require the most restarts. - """ - if key == 'reset': - # Reset the systemd restart counter - reset_systemd_svcs("snmpd snmp-agent") - else: - res = call('snmp.update', {key: value}) - assert value in res[key] - assert get_systemctl_status('snmp-agent') == "RUNNING" - - # Confirm user function after change - user_config = call('snmp.config') - res = user_list_users(user_config) - assert user_config['v3_username'] in res - - def test_SNMPv3_user_delete(self): - - # Make sure the user is currently present - res = call('snmp.get_snmp_users') - assert SNMP_USER_NAME in res - - res = call('snmp.update', {'v3': False, 'v3_username': ''}) - # v3_authtype is defaulted to 'SHA' in the DB - assert not any([res['v3'], res['v3_username'], res['v3_password'], - res['v3_privproto'], res['v3_privpassphrase']]) and 'SHA' in res['v3_authtype'] - assert get_systemctl_status('snmp-agent') == "RUNNING" - - res = call('snmp.get_snmp_users') - assert SNMP_USER_NAME not in res - - # Make sure the user cannot perform SNMP requests - with pytest.raises(Exception) as ve: - res = user_list_users(SNMP_USER_CONFIG) - assert "Unknown user name" in str(ve.value) - - def test_zvol_reporting(self, create_nested_structure): - """ - The TrueNAS snmp agent should list all zvols. - TrueNAS zvols can be created on any ZFS pool or dataset. - The snmp agent should list them all. - snmpwalk -v2c -cpublic localhost 1.3.6.1.4.1.50536.1.2.1.1.2 - """ - # The expectation is that the snmp agent should list exactly the six zvols. - created_items = create_nested_structure - - # Include a snapshot of one of the zvols - with snapshot(created_items['zv'][0], "snmpsnap01"): - snmp_res = v2c_snmpwalk('1.3.6.1.4.1.50536.1.2.1.1.2') - assert all(v in created_items['zv'] for v in snmp_res), f"expected {created_items['zv']}, but found {snmp_res}" diff --git a/tests/api2/test_450_staticroutes.py b/tests/api2/test_450_staticroutes.py deleted file mode 100644 index 7164a5432ed0b..0000000000000 --- a/tests/api2/test_450_staticroutes.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -# License: BSD - - -import os -import pytest -import sys -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import DELETE, GET, POST, SSH_TEST -from auto_config import user, password -DESTINATION = '127.1.1.1' -GATEWAY = '127.0.0.1' - - -@pytest.fixture(scope='module') -def sr_dict(): - return {} - - -def test_01_creating_staticroute(sr_dict): - results = POST('/staticroute/', { - 'destination': DESTINATION, - 'gateway': GATEWAY, - 'description': 'test route', - }) - assert results.status_code == 200, results.text - sr_dict['newroute'] = results.json() - - -def test_02_check_staticroute_configured_using_api(sr_dict): - results = GET(f'/staticroute/?id={sr_dict["newroute"]["id"]}') - assert results.status_code == 200, results.text - data = results.json() - assert isinstance(data, list), data - assert len(data) == 1, data - assert DESTINATION in data[0]['destination'], data - assert data[0]['gateway'] == GATEWAY, data - - -def test_03_checking_staticroute_configured_using_ssh(request): - results = SSH_TEST(f'netstat -4rn|grep -E ^{DESTINATION}', user, password) - assert results['result'] is True, results - assert results['stdout'].strip().split()[1] == GATEWAY, results - - -def test_04_delete_staticroute(sr_dict): - results = DELETE(f'/staticroute/id/{sr_dict["newroute"]["id"]}/') - assert results.status_code == 200, results.text - - -def test_05_check_staticroute_unconfigured_using_api(sr_dict): - results = GET(f'/staticroute/?destination={DESTINATION}') - assert results.status_code == 200, results.text - data = results.json() - assert isinstance(data, list), data - assert len(data) == 0, data - - -def test_06_checking_staticroute_unconfigured_using_ssh(request): - results = SSH_TEST(f'netstat -4rn|grep -E ^{DESTINATION}', user, password) - assert results['result'] is False, results diff --git a/tests/api2/test_470_system.py b/tests/api2/test_470_system.py deleted file mode 100644 index c88fef6c465eb..0000000000000 --- a/tests/api2/test_470_system.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD -# Location for tests into REST API of FreeNAS - -import os -import sys - -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET -from middlewared.test.integration.utils import call - - -def test_01_check_if_system_is_ready_to_use(): - results = GET("/system/ready/") - assert results.json() is True, results.text - - -def test_02_checking_system_version(): - results = GET("/system/version/") - assert results.status_code == 200, results.text - assert type(results.json()) == str, results.text - - -def test_03_check_system_version_match_with_system_info(): - system_version = GET("/system/info/").json()['version'] - system_info_version = GET("/system/version/").json() - assert system_version == system_info_version - - -def test_04_check_system_product_type(): - results = GET("/system/product_type/") - assert results.status_code == 200, results.text - result = results.json() - assert isinstance(result, str), results.text - assert result in ('SCALE', 'SCALE_ENTERPRISE'), results.text - - -def test_05_check_system_debug(): - results = GET("/system/debug/") - assert results.status_code == 200, results.text - - -def test_06_check_system_set_time(): - """ - This test intentionally slews our clock to be off - by 300 seconds and then verifies that it got set - """ - results = GET("/system/info/") - assert results.status_code == 200, results.text - - # Convert to seconds - datetime = results.json()['datetime']['$date'] / 1000 - - # hop 300 seconds into the past - target = datetime - 300 - call('system.set_time', int(target)) - - results = GET("/system/info/") - assert results.status_code == 200, results.text - datetime2 = results.json()['datetime']['$date'] / 1000 - - # This is a fudge-factor because NTP will start working - # pretty quickly to correct the slew. - assert abs(target - datetime2) < 60 diff --git a/tests/api2/test_475_syslog.py b/tests/api2/test_475_syslog.py deleted file mode 100644 index 57b5079287cdb..0000000000000 --- a/tests/api2/test_475_syslog.py +++ /dev/null @@ -1,97 +0,0 @@ -from time import sleep - -import pytest -from auto_config import password, user -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.utils.client import truenas_server - - - -def do_syslog(ident, message, facility='syslog.LOG_USER', priority='syslog.LOG_INFO'): - """ - This generates a syslog message on the TrueNAS server we're currently testing. - We don't need to override IP addr or creds because we are not a syslog target. - """ - cmd = 'python3 -c "import syslog;' - cmd += f'syslog.openlog(ident=\\\"{ident}\\\", facility={facility});' - cmd += f'syslog.syslog({priority},\\\"{message}\\\");syslog.closelog()"' - ssh(cmd) - - -def check_syslog(log_path, message, target_ip=None, target_user=user, target_passwd=password, timeout=30): - """ - Common function to check whether a particular message exists in a log file. - This will be used to check local and remote syslog servers. - - Current implementation performs simple grep through the log file, and so - onus is on test developer to not under-specify `message` in order to avoid - false positives. - """ - target_ip = target_ip or truenas_server.ip - sleep_time = 1 - while timeout > 0: - found = ssh( - f'grep -R "{message}" {log_path}', - check=False, - user=target_user, - password=target_passwd, - ip=target_ip - ) - if not found: - sleep(sleep_time) - timeout -= sleep_time - else: - return found - - -@pytest.mark.parametrize('params', [ - { - 'ident': 'iscsi-scstd', - 'msg': 'ZZZZ: random scst test', - 'path': '/var/log/scst.log', - }, - { - 'ident': 'iscsi-scstd', - 'msg': 'ZZZZ: random scst test', - 'path': '/var/log/scst.log', # This is just to make sure our exclude filter works as intended - }, -]) -def test_local_syslog_filter(request, params): - """ - This test validates that our syslog-ng filters are correctly placing - messages into their respective paths in /var/log - """ - do_syslog( - params['ident'], - params['msg'], - params.get('facility', 'syslog.LOG_USER'), - params.get('priority', 'syslog.LOG_INFO') - ) - assert check_syslog(params['path'], params['msg'], timeout=10) - - -@pytest.mark.parametrize('log_path', [ - '/var/log/messages', - '/var/log/syslog', - '/var/log/daemon.log' -]) -def test_filter_leak(request, log_path): - """ - This test validates that our exclude filter works properly and that - particularly spammy applications aren't polluting useful logs. - """ - results = ssh(f'grep -R "ZZZZ:" {log_path}', complete_response=True, check=False) - assert results['result'] is False, str(results['result']) - - -def test_07_check_can_set_remote_syslog(request): - """ - Basic test to validate that setting a remote syslog target - doesn't break syslog-ng config - """ - try: - data = call('system.advanced.update', {'syslogserver': '127.0.0.1'}) - assert data['syslogserver'] == '127.0.0.1' - call('service.restart', 'syslogd', {'silent': False}) - finally: - call('system.advanced.update', {'syslogserver': ''}) diff --git a/tests/api2/test_490_system_general.py b/tests/api2/test_490_system_general.py deleted file mode 100644 index 7b0f391702df0..0000000000000 --- a/tests/api2/test_490_system_general.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python3 -# License: BSD - -import pytest - -from middlewared.test.integration.utils import call, ssh - -import sys -import os -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import PUT, GET, SSH_TEST -from auto_config import user, password -TIMEZONE = "America/New_York" - - -def test_01_get_system_general(): - results = GET("/system/general/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict) - - -def test_02_get_system_general_language_choices(): - results = GET("/system/general/language_choices/") - assert results.status_code == 200, results.text - data = results.json() - assert isinstance(data, dict), data - - -def test_03_get_system_general_timezone_choices(): - results = GET("/system/general/timezone_choices/") - assert results.status_code == 200, results.text - data = results.json() - assert isinstance(data, dict), data - assert TIMEZONE in data - - -def test_04_get_system_general_country_choices(): - results = GET("/system/general/country_choices/") - assert results.status_code == 200, results.text - data = results.json() - assert isinstance(data, dict), data - - -def test_05_get_system_general_kbdmap_choices(): - results = GET("/system/general/kbdmap_choices/") - assert results.status_code == 200, results.text - data = results.json() - assert isinstance(data, dict), data - - -def test_06_Setting_timezone(): - results = PUT("/system/general/", {"timezone": TIMEZONE}) - assert results.status_code == 200, results.text - - -def test_07_Checking_timezone_using_api(): - results = GET("/system/general/") - assert results.status_code == 200, results.text - data = results.json() - assert data['timezone'] == TIMEZONE - - -def test_08_Checking_timezone_using_ssh(request): - results = SSH_TEST(f'diff /etc/localtime /usr/share/zoneinfo/{TIMEZONE}', - user, password) - assert results['result'] is True, results diff --git a/tests/api2/test_500_system_ntpservers.py b/tests/api2/test_500_system_ntpservers.py deleted file mode 100644 index a34ce1fd1b882..0000000000000 --- a/tests/api2/test_500_system_ntpservers.py +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/env python3 -# License: BSD - -import os -import sys -import time - -import pytest -from pytest_dependency import depends - -apifolder = os.getcwd() -sys.path.append(apifolder) -from auto_config import badNtpServer, password, user -from functions import DELETE, GET, POST, PUT, SSH_TEST - -CONFIG_FILE = '/etc/chrony/chrony.conf' -from middlewared.test.integration.utils import call - - -class TestBadNtpServer: - - @pytest.fixture(scope='class') - def ntp_dict(self): - # read the current config to restore when done - # we will remove all but the lowest id item - results = GET('/system/ntpserver') - assert results.status_code == 200, results.text - - orig_ntp_servers = results.json() - lowest_id = min([ntp['id'] for ntp in orig_ntp_servers]) - try: - yield {'lowest_id': lowest_id} - finally: - for ntp in orig_ntp_servers: - ident = ntp['id'] - del ntp['id'] - ntp['force'] = True - - if ident == lowest_id: - result = PUT(f'/system/ntpserver/id/{ident}/', ntp) - else: - result = POST('/system/ntpserver', ntp) - assert result.status_code == 200, result.text - - def test_01_Changing_options_in_ntpserver(self, ntp_dict): - ident = ntp_dict['lowest_id'] - results = PUT(f'/system/ntpserver/id/{ident}/', { - 'address': badNtpServer, - 'burst': True, - 'iburst': True, - 'maxpoll': 10, - 'minpoll': 6, - 'prefer': True, - 'force': True}) - assert results.status_code == 200, results.text - - def test_02_Check_ntpserver_configured_using_api(self, ntp_dict): - ident = ntp_dict['lowest_id'] - results = GET(f'/system/ntpserver/?id={ident}') - assert results.status_code == 200, results.text - data = results.json() - assert isinstance(data, list), data - assert len(data) == 1, data - assert data[0]['address'] == badNtpServer, data - - def test_03_Checking_ntpserver_configured_using_ssh(self, request): - cmd = f'fgrep "{badNtpServer}" {CONFIG_FILE}' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results - - def test_04_Check_ntpservers(self, ntp_dict): - results = GET('/system/ntpserver/') - assert results.status_code == 200, results.text - data = results.json() - assert isinstance(data, list), data - ntp_dict['servers'] = {i['id']: i for i in data} - - def test_05_Removing_non_AD_NTP_servers(self, ntp_dict): - ident = ntp_dict['lowest_id'] - if len(ntp_dict['servers']) == 1: - pytest.skip('Only one NTP server found') - for k in list(ntp_dict['servers'].keys()): - if k == ident: - continue - results = DELETE(f'/system/ntpserver/id/{k}/') - assert results.status_code == 200, results.text - ntp_dict['servers'].pop(k) - - def test_06_Checking_ntpservers_num_configured_using_ssh(self, ntp_dict, request): - results = SSH_TEST(f'grep -R ^server {CONFIG_FILE}', user, password) - assert results['result'] is True, results - assert len(results['stdout'].strip().split('\n')) == \ - len(ntp_dict['servers']), results['output'] - - def test_07_check_alert_set(self, ntp_dict): - # Run the NTPHealthCheckAlertClass and ensure it has an alert - alerts = call('alert.run_source', 'NTPHealthCheck') - assert len(alerts) == 1, alerts - assert alerts[0]['args']['reason'].startswith("No Active NTP peers"), alerts - - -def test_08_check_alert_clear(): - # Now that the original NTP servers have been restored, check the alerts are gone - # Give some retries to allow the daemon sync with the sources - retries = 10 - while retries > 0: - retries -= 1 - alerts = call('alert.run_source', 'NTPHealthCheck') - if len(alerts) == 0: - break - time.sleep(2) - assert len(alerts) == 0, alerts diff --git a/tests/api2/test_530_ups.py b/tests/api2/test_530_ups.py deleted file mode 100644 index 5175a14d7156e..0000000000000 --- a/tests/api2/test_530_ups.py +++ /dev/null @@ -1,236 +0,0 @@ -import os -from tempfile import NamedTemporaryFile -from time import sleep - -import pytest - -from assets.websocket.service import ensure_service_enabled, ensure_service_started -from auto_config import password, user -from functions import send_file - -from middlewared.test.integration.utils import call, mock, ssh -from middlewared.test.integration.utils.client import truenas_server - -DUMMY_FAKEDATA_DEV = '/tmp/fakedata.dev' -SHUTDOWN_MARKER_FILE = '/tmp/is_shutdown' - -first_ups_payload = { - 'rmonitor': True, - 'mode': 'MASTER', - 'shutdown': 'BATT', - 'port': '655', - 'remotehost': '127.0.0.1', - 'identifier': 'ups', - 'driver': 'usbhid-ups$PROTECT NAS', - 'monpwd': 'mypassword' -} - -second_ups_payload = { - 'rmonitor': False, - 'mode': 'SLAVE', - 'shutdown': 'LOWBATT', - 'port': '65535', - 'identifier': 'foo', - 'monpwd': 'secondpassword' -} - -default_dummy_data = { - 'battery.charge': 100, - 'driver.parameter.pollinterval': 2, - 'input.frequency': 49.9, - 'input.frequency.nominal': 50.0, - 'input.voltage': 230, - 'input.voltage.nominal': 240, - 'ups.status': 'OL', - 'ups.timer.shutdown': -1, - 'ups.timer.start': -1, -} - - -def get_service_state(): - return call('service.query', [['service', '=', 'ups']], {'get': True}) - - -def remove_file(filepath): - ssh(f'rm {filepath}', check=False) - - -def did_shutdown(): - return ssh(f'cat {SHUTDOWN_MARKER_FILE}', check=False) == "done\n" - - -def write_fake_data(data=None): - data = data or {} - all_data = default_dummy_data | data - with NamedTemporaryFile() as f: - for k, v in all_data.items(): - f.write(f'{k}: {v}\n'.encode('utf-8')) - f.flush() - os.fchmod(f.fileno(), 0o644) - results = send_file(f.name, DUMMY_FAKEDATA_DEV, user, password, truenas_server.ip) - assert results['result'], str(results['output']) - - -def wait_for_alert(klass, retries=10): - assert retries > 0 - while retries: - alerts = call('alert.list') - for alert in alerts: - if alert['klass'] == klass: - return alert - sleep(1) - retries -= 1 - - -@pytest.fixture(scope='module') -def ups_running(): - with ensure_service_enabled('ups'): - with ensure_service_started('ups'): - yield - - -@pytest.fixture(scope='module') -def dummy_ups_driver_configured(): - write_fake_data() - remove_file(SHUTDOWN_MARKER_FILE) - old_config = call('ups.config') - del old_config['complete_identifier'] - del old_config['id'] - payload = { - 'mode': 'MASTER', - 'driver': 'dummy-ups', - 'port': DUMMY_FAKEDATA_DEV, - 'description': 'dummy-ups in dummy-once mode', - 'shutdowncmd': f'echo done > {SHUTDOWN_MARKER_FILE}' - } - with mock('ups.driver_choices', return_value={'dummy-ups': 'Driver for multi-purpose UPS emulation', - 'usbhid-ups$PROTECT NAS': 'AEG Power Solutions ups 3 PROTECT NAS (usbhid-ups)'}): - call('ups.update', payload) - try: - yield - finally: - call('ups.update', old_config) - remove_file(SHUTDOWN_MARKER_FILE) - remove_file(DUMMY_FAKEDATA_DEV) - - -def test__enable_ups_service(): - results = get_service_state() - assert results['state'] == 'STOPPED', results - assert results['enable'] is False, results - call('service.update', 'ups', {'enable': True}) - results = get_service_state() - assert results['enable'] is True, results - - -def test__set_ups_options(): - results = call('ups.update', first_ups_payload) - for data in first_ups_payload.keys(): - assert first_ups_payload[data] == results[data], results - - -def test__start_ups_service(): - call('service.start', 'ups') - results = get_service_state() - assert results['state'] == 'RUNNING', results - - -def test__get_reports_configuration_as_saved(): - results = call('ups.config') - for data in first_ups_payload.keys(): - assert first_ups_payload[data] == results[data], results - - -def test__change_ups_options_while_service_is_running(): - payload = { - 'port': '65545', - 'identifier': 'boo' - } - results = call('ups.update', payload) - for data in ['port', 'identifier']: - assert payload[data] == results[data], results - results = call('ups.config') - for data in ['port', 'identifier']: - assert payload[data] == results[data], results - - -def test__stop_ups_service(): - results = get_service_state() - assert results['state'] == 'RUNNING', results - call('service.stop', 'ups') - results = get_service_state() - assert results['state'] == 'STOPPED', results - - -def test__change_ups_options(): - results = call('ups.update', second_ups_payload) - for data in second_ups_payload.keys(): - assert second_ups_payload[data] == results[data], results - call('service.start', 'ups') - results = get_service_state() - assert results['state'] == 'RUNNING', results - results = call('ups.config') - for data in second_ups_payload.keys(): - assert second_ups_payload[data] == results[data], results - - -def test__get_ups_driver_choice(): - results = call('ups.driver_choices') - assert isinstance(results, dict) is True, results - - -def test__get_ups_port_choice(): - results = call('ups.port_choices') - assert isinstance(results, list) is True, results - assert isinstance(results[0], str) is True, results - - -def test__disable_and_stop_ups_service(): - call('service.update', 'ups', {'enable': False}) - results = get_service_state() - assert results['enable'] is False, results - call('service.stop', 'ups') - results = get_service_state() - assert results['state'] == 'STOPPED', results - - -def test__ups_online_to_online_lowbattery(ups_running, dummy_ups_driver_configured): - results = get_service_state() - assert results['state'] == 'RUNNING', results - sleep(2) - assert 'UPSBatteryLow' not in [alert['klass'] for alert in call('alert.list')] - write_fake_data({'battery.charge': 20, 'ups.status': 'OL LB'}) - alert = wait_for_alert('UPSBatteryLow') - assert alert - assert 'battery.charge: 20' in alert['formatted'], alert - assert not did_shutdown() - - -def test__ups_online_to_onbatt(ups_running, dummy_ups_driver_configured): - assert 'UPSOnBattery' not in [alert['klass'] for alert in call('alert.list')] - write_fake_data({'battery.charge': 40, 'ups.status': 'OB'}) - alert = wait_for_alert('UPSOnBattery') - assert alert - assert 'battery.charge: 40' in alert['formatted'], alert - assert not did_shutdown() - - -def test__ups_onbatt_to_online(ups_running, dummy_ups_driver_configured): - assert 'UPSOnline' not in [alert['klass'] for alert in call('alert.list')] - write_fake_data({'battery.charge': 100, 'ups.status': 'OL'}) - alert = wait_for_alert('UPSOnline') - assert alert - assert 'battery.charge: 100' in alert['formatted'], alert - assert not did_shutdown() - - -def test__ups_online_to_onbatt_lowbattery(ups_running, dummy_ups_driver_configured): - assert 'UPSOnBattery' not in [alert['klass'] for alert in call('alert.list')] - write_fake_data({'battery.charge': 10, 'ups.status': 'OB LB'}) - alert = wait_for_alert('UPSOnBattery') - assert alert - assert 'battery.charge: 10' in alert['formatted'], alert - alert = wait_for_alert('UPSBatteryLow') - assert alert - assert 'battery.charge: 10' in alert['formatted'], alert - assert did_shutdown() diff --git a/tests/api2/test_541_vm.py b/tests/api2/test_541_vm.py deleted file mode 100644 index 04f39bad9e01d..0000000000000 --- a/tests/api2/test_541_vm.py +++ /dev/null @@ -1,268 +0,0 @@ -import dataclasses -import time - -import pytest -from pytest_dependency import depends - -from auto_config import pool_name -from middlewared.test.integration.utils import call -from middlewared.test.integration.assets.pool import dataset - - -@dataclasses.dataclass -class VmAssets: - # probably best to keep this module - # to only creating 1 VM, since the - # functionality can be tested on 1 - # and creating > 1 nested VMs incurs - # an ever-increasing perf penalty in - # test infrastructure - VM_NAMES = ['vm01'] - VM_INFO = dict() - VM_DEVICES = dict() - - -@pytest.mark.dependency(name='VIRT_SUPPORTED') -def test_001_is_virtualization_supported(): - if not call('vm.virtualization_details')['supported']: - pytest.skip('Virtualization not supported') - elif call('failover.licensed'): - pytest.skip('Virtualization not supported on HA') - - -@pytest.mark.parametrize( - 'info', - [ - {'method': 'vm.flags', 'type': dict, 'keys': ('intel_vmx', 'amd_rvi')}, - {'method': 'vm.cpu_model_choices', 'type': dict, 'keys': ('EPYC',)}, - {'method': 'vm.bootloader_options', 'type': dict, 'keys': ('UEFI', 'UEFI_CSM')}, - {'method': 'vm.get_available_memory', 'type': int}, - {'method': 'vm.guest_architecture_and_machine_choices', 'type': dict, 'keys': ('i686', 'x86_64')}, - {'method': 'vm.maximum_supported_vcpus', 'type': int}, - {'method': 'vm.port_wizard', 'type': dict, 'keys': ('port', 'web')}, - {'method': 'vm.random_mac', 'type': str}, - {'method': 'vm.resolution_choices', 'type': dict, 'keys': ('1920x1200', '640x480')}, - {'method': 'vm.device.bind_choices', 'type': dict, 'keys': ('0.0.0.0', '::')}, - {'method': 'vm.device.iommu_enabled', 'type': bool}, - {'method': 'vm.device.iotype_choices', 'type': dict, 'keys': ('NATIVE',)}, - {'method': 'vm.device.nic_attach_choices', 'type': dict}, - {'method': 'vm.device.usb_controller_choices', 'type': dict, 'keys': ('qemu-xhci',)}, - {'method': 'vm.device.usb_passthrough_choices', 'type': dict}, - {'method': 'vm.device.passthrough_device_choices', 'type': dict}, - {'method': 'vm.device.pptdev_choices', 'type': dict} - ], - ids=lambda x: x['method'] -) -def test_002_vm_endpoint(info, request): - """ - Very basic behavior of various VM endpoints. Ensures they - return without error and that the type of response is what - we expect. If a dict is returned, we check that top-level - keys exist - """ - depends(request, ['VIRT_SUPPORTED']) - rv = call(info['method']) - assert isinstance(rv, info['type']) - if (keys := info.get('keys')): - assert all((i in rv for i in keys)) - - -@pytest.mark.parametrize('disk_name', ['test zvol']) -def test_003_verify_disk_choice(disk_name): - with dataset(disk_name, {'type': 'VOLUME', 'volsize': 1048576, 'sparse': True}) as ds: - assert call('vm.device.disk_choices').get(f'/dev/zvol/{ds.replace(" ", "+")}') == ds - - -@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES) -@pytest.mark.dependency(name='VM_CREATED') -def test_010_create_vm(vm_name, request): - depends(request, ['VIRT_SUPPORTED']) - vm_payload = { - 'name': vm_name, - 'description': f'{vm_name} description', - 'vcpus': 1, - 'memory': 512, - 'bootloader': 'UEFI', - 'autostart': False, - } - vm = call('vm.create', vm_payload) - qry = call('vm.query', [['id', '=', vm['id']]], {'get': True}) - assert all((vm_payload[key] == qry[key] for key in vm_payload)) - VmAssets.VM_INFO.update({qry['name']: {'query_response': qry}}) - - -@pytest.mark.parametrize('device', ['DISK', 'DISPLAY', 'NIC']) -@pytest.mark.dependency(name='ADD_DEVICES_TO_VM') -def test_011_add_devices_to_vm(device, request): - depends(request, ['VM_CREATED']) - for vm_name, info in VmAssets.VM_INFO.items(): - if vm_name not in VmAssets.VM_DEVICES: - VmAssets.VM_DEVICES[vm_name] = dict() - - dev_info = { - 'dtype': device, - 'vm': info['query_response']['id'], - } - if device == 'DISK': - zvol_name = f'{pool_name}/{device}_for_{vm_name}' - dev_info.update({ - 'attributes': { - 'create_zvol': True, - 'zvol_name': zvol_name, - 'zvol_volsize': 1048576 - } - }) - elif device == 'DISPLAY': - dev_info.update({'attributes': {'resolution': '1024x768', 'password': 'displaypw'}}) - elif device == 'NIC': - for nic_name in call('vm.device.nic_attach_choices'): - dev_info.update({'attributes': {'nic_attach': nic_name}}) - break - else: - assert False, f'Unhandled device type: ({device!r})' - - info = call('vm.device.create', dev_info) - VmAssets.VM_DEVICES[vm_name].update({device: info}) - # only adding these devices to 1 VM - break - - -@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES) -def test_012_verify_devices_for_vm(vm_name, request): - depends(request, ['ADD_DEVICES_TO_VM']) - for device, info in VmAssets.VM_DEVICES[vm_name].items(): - qry = call('vm.device.query', [['id', '=', info['id']]], {'get': True}) - assert qry['dtype'] == device - assert qry['vm'] == VmAssets.VM_INFO[vm_name]['query_response']['id'] - assert qry['attributes'] == VmAssets.VM_DEVICES[vm_name][device]['attributes'] - - -@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES) -def test_013_delete_vm_devices(vm_name, request): - depends(request, ['ADD_DEVICES_TO_VM']) - for device, info in VmAssets.VM_DEVICES[vm_name].items(): - opts = {} - if device == 'DISK': - opts = {'zvol': True} - - call('vm.device.delete', info['id'], opts) - assert not call('vm.device.query', [['id', '=', info['id']]]) - - -@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES) -@pytest.mark.dependency(name='VM_STARTED') -def test_014_start_vm(vm_name, request): - depends(request, ['VM_CREATED']) - _id = VmAssets.VM_INFO[vm_name]['query_response']['id'] - call('vm.start', _id) - vm_status = call('vm.status', _id) - assert all((vm_status[key] == 'RUNNING' for key in ('state', 'domain_state'))) - assert all((vm_status['pid'], isinstance(vm_status['pid'], int))) - - -@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES) -def test_015_query_vm_info(vm_name, request): - depends(request, ['VIRT_SUPPORTED', 'VM_CREATED', 'VM_STARTED']) - _id = VmAssets.VM_INFO[vm_name]['query_response']['id'] - vm_string = f'{_id}_{vm_name}' - assert call('vm.get_console', _id) == vm_string - assert vm_string in call('vm.log_file_path', _id) - - mem_keys = ('RNP', 'PRD', 'RPRD') - mem_info = call('vm.get_vmemory_in_use') - assert isinstance(mem_info, dict) - assert all((key in mem_info for key in mem_keys)) - assert all((isinstance(mem_info[key], int) for key in mem_keys)) - - -@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES) -@pytest.mark.dependency(name='VM_SUSPENDED') -def test_020_suspend_vm(vm_name, request): - depends(request, ['VIRT_SUPPORTED', 'VM_CREATED', 'VM_STARTED']) - _id = VmAssets.VM_INFO[vm_name]['query_response']['id'] - call('vm.suspend', _id) - for retry in range(1, 4): - status = call('vm.status', _id) - if all((status['state'] == 'SUSPENDED', status['domain_state'] == 'PAUSED')): - break - else: - time.sleep(1) - else: - assert False, f'Timed out after {retry} seconds waiting on {vm_name!r} to suspend' - - -@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES) -@pytest.mark.dependency(name='VM_RESUMED') -def test_021_resume_vm(vm_name, request): - depends(request, ['VM_SUSPENDED']) - _id = VmAssets.VM_INFO[vm_name]['query_response']['id'] - call('vm.resume', _id) - for retry in range(1, 4): - status = call('vm.status', _id) - if all((status['state'] == 'RUNNING', status['domain_state'] == 'RUNNING')): - break - else: - time.sleep(1) - else: - assert False, f'Timed out after {retry} seconds waiting on {vm_name!r} to resume' - -@pytest.mark.skip(reason='Takes > 60 seconds and is flaky') -@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES) -@pytest.mark.dependency(name='VM_RESTARTED') -def test_022_restart_vm(vm_name, request): - depends(request, ['VM_RESUMED']) - _id = VmAssets.VM_INFO[vm_name]['query_response']['id'] - call('vm.restart', _id, job=True) - status = call('vm.status', _id) - assert all((status['state'] == 'RUNNING', status['domain_state'] == 'RUNNING')) - - -@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES) -@pytest.mark.dependency(name='VM_POWERED_OFF') -def test_023_poweroff_vm(vm_name, request): - depends(request, ['VM_RESUMED']) - _id = VmAssets.VM_INFO[vm_name]['query_response']['id'] - call('vm.poweroff', _id) - for retry in range(1, 4): - status = call('vm.status', _id) - if all((status['state'] == 'STOPPED', status['domain_state'] == 'SHUTOFF')): - break - else: - time.sleep(1) - else: - assert False, f'Timed out after {retry} seconds waiting on {vm_name!r} to poweroff' - - -@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES) -@pytest.mark.dependency(name='VM_UPDATED') -def test_024_update_powered_off_vm(vm_name, request): - depends(request, ['VM_POWERED_OFF']) - _id = VmAssets.VM_INFO[vm_name]['query_response']['id'] - new_mem = 768 - call('vm.update', _id, {'memory': new_mem}) - assert call('vm.query', [['id', '=', _id]], {'get': True})['memory'] == new_mem - - -@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES) -def test_024_clone_powered_off_vm(vm_name, request): - depends(request, ['VM_POWERED_OFF']) - to_clone_id = VmAssets.VM_INFO[vm_name]['query_response']['id'] - new_name = f'{vm_name}_clone' - call('vm.clone', to_clone_id, new_name) - qry = call('vm.query', [['name', '=', new_name]], {'get': True}) - VmAssets.VM_INFO.update({new_name: {'query_response': qry}}) - assert call('vm.get_console', qry['id']) == f'{qry["id"]}_{new_name}' - - VmAssets.VM_DEVICES.update({new_name: dict()}) - for dev in call('vm.device.query', [['vm', '=', qry['id']]]): - if dev['dtype'] in ('DISK', 'NIC', 'DEVICE'): - # add this to VM_DEVICES so we properly clean-up after - # the test module runs - VmAssets.VM_DEVICES[new_name].update({dev['dtype']: dev}) - - -def test_025_cleanup_vms(request): - depends(request, ['VM_POWERED_OFF']) - for vm in call('vm.query'): - call('vm.delete', vm['id']) - assert not call('vm.query', [['name', '=', vm['id']]]) diff --git a/tests/api2/test_550_vmware.py b/tests/api2/test_550_vmware.py deleted file mode 100644 index b579ecc971e80..0000000000000 --- a/tests/api2/test_550_vmware.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD - -import pytest -import sys -import os -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET, POST, SSH_TEST -from auto_config import user, password - -try: - Reason = 'VMWARE credentials credential is missing' - from config import VMWARE_HOST, VMWARE_USERNAME, VMWARE_PASSWORD - vmw_credentials = True -except ImportError: - vmw_credentials = False - - -def test_01_get_vmware_query(): - results = GET('/vmware/') - assert results.status_code == 200 - assert isinstance(results.json(), list) is True - - -if vmw_credentials: - def test_02_create_vmware(): - payload = { - 'hostname': VMWARE_HOST, - 'username': VMWARE_USERNAME, - 'password': VMWARE_PASSWORD - } - results = POST('/vmware/get_datastores/', payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list) is True, results.text - - def test_03_verify_vmware_get_datastore_do_not_leak_password(request): - cmd = f"grep -R \"{os.environ['VMWARE_PASSWORD']}\" " \ - "/var/log/middlewared.log" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) diff --git a/tests/api2/test_790_update.py b/tests/api2/test_790_update.py deleted file mode 100644 index df365fdff47b7..0000000000000 --- a/tests/api2/test_790_update.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD - -import pytest -import sys -import os -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET, POST, SSH_TEST, vm_state, vm_start, ping_host -from auto_config import vm_name, user, password, update -from time import sleep -from middlewared.test.integration.utils.client import truenas_server - -url = "https://raw.githubusercontent.com/iXsystems/ixbuild/master/prepnode/" - -if update: - def test_00_get_update_conf_for_internals_and_nightly(request): - version = GET("/system/info/").json()['version'] - update_conf = 'truenas-update.conf' - fetch_cmd = f'fetch {url}{update_conf}' - mv_cmd = f'mv {update_conf} /data/update.conf' - if 'INTERNAL' in version: - results = SSH_TEST(fetch_cmd, user, password) - assert results['result'] is True, results['output'] - results = SSH_TEST(mv_cmd, user, password) - assert results['result'] is True, results['output'] - assert True - - def test_01_get_initial_FreeNAS_version(): - results = GET("/system/info/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict) is True, results.text - global initial_version - initial_version = results.json()['version'] - - def test_02_get_update_trains(): - results = GET('/update/get_trains/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict) is True, results.text - global selected_trains - selected_trains = results.json()['selected'] - - @pytest.mark.dependency(name="update_03") - def test_03_check_available_update(): - global update_version - results = POST('/update/check_available/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict) is True, results.text - assert results.json()['status'] == 'AVAILABLE', results.text - update_version = results.json()['version'] - - def test_04_update_get_pending(request): - depends(request, ["update_03"]) - results = POST('/update/get_pending/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), list) is True, results.text - assert results.json() == [], results.text - - def test_05_get_download_update(request): - depends(request, ["update_03"]) - results = GET('/update/download/') - global JOB_ID - assert results.status_code == 200, results.text - assert isinstance(results.json(), int) is True, results.text - JOB_ID = results.json() - - @pytest.mark.dependency(name="update_06") - @pytest.mark.timeout(600) - def test_06_verify_the_update_download_is_successful(request): - depends(request, ["update_03"]) - while True: - get_job = GET(f'/core/get_jobs/?id={JOB_ID}') - job_status = get_job.json()[0] - if job_status['state'] in ('FAILED', 'SUCCESS'): - break - assert job_status['state'] == 'SUCCESS', get_job.text - - def test_07_get_pending_update(request): - depends(request, ["update_03", "update_06"]) - results = POST('/update/get_pending/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), list) is True, results.text - assert results.json() != [], results.text - - def test_08_install_update(request): - depends(request, ["update_03", "update_06"]) - global reboot - reboot = False - payload = { - "train": selected_trains, - "reboot": reboot - } - results = POST('/update/update/', payload) - global JOB_ID - assert results.status_code == 200, results.text - assert isinstance(results.json(), int) is True, results.text - JOB_ID = results.json() - - @pytest.mark.dependency(name="update_09") - @pytest.mark.timeout(600) - def test_09_verify_the_update_is_successful(request): - depends(request, ["update_03", "update_06"]) - while True: - get_job = GET(f'/core/get_jobs/?id={JOB_ID}') - job_status = get_job.json()[0] - if job_status['state'] in ('FAILED', 'SUCCESS'): - if 'Unable to downgrade' in job_status['error']: - pytest.skip('skiped due to downgrade') - break - sleep(5) - assert job_status['state'] == 'SUCCESS', get_job.text - - @pytest.mark.dependency(name="update_10") - def test_10_verify_system_is_ready_to_reboot(request): - depends(request, ["update_03", "update_06", "update_09"]) - results = POST('/update/check_available/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict) is True, results.text - assert results.json()['status'] == 'REBOOT_REQUIRED', results.text - - def test_11_wait_for_first_reboot_with_bhyve(request): - depends(request, ["update_03", "update_06", "update_09", "update_10"]) - if reboot is False: - pytest.skip('Reboot is False skip') - else: - if vm_name is None: - pytest.skip('skip no vm_name') - else: - while vm_state(vm_name) != 'stopped': - sleep(5) - assert vm_start(vm_name) is True - sleep(1) - - def test_12_wait_for_second_reboot_with_bhyve(request): - depends(request, ["update_03", "update_06", "update_09", "update_10"]) - if reboot is False: - pytest.skip('Reboot is False skip') - else: - if vm_name is None: - pytest.skip('skip no vm_name') - else: - while vm_state(vm_name) != 'stopped': - sleep(5) - assert vm_start(vm_name) is True - sleep(1) - - def test_13_wait_for_FreeNAS_to_be_online(request): - depends(request, ["update_03", "update_06", "update_09", "update_10"]) - if reboot is False: - pytest.skip('Reboot is False skip') - else: - while ping_host(truenas_server.ip, 1) is not True: - sleep(5) - assert ping_host(truenas_server.ip, 1) is True - sleep(10) - - def test_14_verify_initial_version_is_not_current_FreeNAS_version(request): - depends(request, ["update_03", "update_06", "update_09", "update_10"]) - if reboot is False: - pytest.skip('Reboot is False skip') - else: - global results, current_version - results = GET("/system/info/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), dict) is True, results.text - current_version = results.json()['version'] - assert initial_version != current_version, results.text - - def test_15_verify_update_version_is_current_version(request): - depends(request, ["update_03", "update_06", "update_09", "update_10"]) - if reboot is False: - pytest.skip('Reboot is False skip') - else: - assert update_version == current_version, results.text diff --git a/tests/api2/test_900_docs.py b/tests/api2/test_900_docs.py deleted file mode 100644 index a3a128f4520b6..0000000000000 --- a/tests/api2/test_900_docs.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python3 - -# License: BSD - -import pytest -import sys -import os -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import SSH_TEST -from auto_config import user, password - - -def test_core_get_methods(request): - results = SSH_TEST("midclt call core.get_methods", user, password) - assert results['result'] is True, results diff --git a/tests/api2/test_999_pool_dataset_unlock.py b/tests/api2/test_999_pool_dataset_unlock.py deleted file mode 100644 index 9c19442683081..0000000000000 --- a/tests/api2/test_999_pool_dataset_unlock.py +++ /dev/null @@ -1,191 +0,0 @@ -import os -import sys -apifolder = os.getcwd() -sys.path.append(apifolder) - -import contextlib -import urllib.parse - -import pytest - -from auto_config import pool_name -from functions import POST, DELETE, wait_on_job -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.utils import ssh -from protocols import SMB -from samba import ntstatus, NTSTATUSError - - -SMB_PASSWORD = 'Abcd1234' -SMB_USER = 'smbuser999' - - -def passphrase_encryption(): - return { - 'encryption_options': { - 'generate_key': False, - 'pbkdf2iters': 100000, - 'algorithm': 'AES-128-CCM', - 'passphrase': 'passphrase', - }, - 'encryption': True, - 'inherit_encryption': False, - } - - -@contextlib.contextmanager -def dataset(name, options=None): - assert "/" not in name - - dataset = f"{pool_name}/{name}" - - result = POST("/pool/dataset/", {"name": dataset, **(options or {})}) - assert result.status_code == 200, result.text - - result = POST("/filesystem/setperm/", {'path': f"/mnt/{dataset}", "mode": "777"}) - assert result.status_code == 200, result.text - job_status = wait_on_job(result.json(), 180) - assert job_status["state"] == "SUCCESS", str(job_status["results"]) - - try: - yield dataset - finally: - result = DELETE(f"/pool/dataset/id/{urllib.parse.quote(dataset, '')}/") - assert result.status_code == 200, result.text - - -@contextlib.contextmanager -def smb_share(name, path, options=None): - results = POST("/sharing/smb/", { - "name": name, - "path": path, - "guestok": True, - **(options or {}), - }) - assert results.status_code == 200, results.text - id = results.json()["id"] - - try: - yield id - finally: - result = DELETE(f"/sharing/smb/id/{id}/") - assert result.status_code == 200, result.text - - -def lock_dataset(name): - payload = { - 'id': name, - 'lock_options': { - 'force_umount': True - } - } - results = POST('/pool/dataset/lock', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def unlock_dataset(name, options=None): - payload = { - 'id': name, - 'unlock_options': { - 'recursive': True, - 'datasets': [ - { - 'name': name, - 'passphrase': 'passphrase' - } - ], - **(options or {}), - } - } - results = POST('/pool/dataset/unlock/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - assert job_status['results']['result']['unlocked'] == [name], str(job_status['results']) - - -@contextlib.contextmanager -def smb_connection(**kwargs): - c = SMB() - c.connect(**kwargs) - - try: - yield c - finally: - c.disconnect() - - -@pytest.fixture(scope='module') -def smb_user(): - with user({ - 'username': SMB_USER, - 'full_name': 'doug', - 'group_create': True, - 'password': SMB_PASSWORD, - 'smb': True - }, get_instance=True) as u: - yield u - - -@pytest.mark.dependency(name="create_dataset") -@pytest.mark.parametrize("toggle_attachments", [True, False]) -def test_pool_dataset_unlock_smb(smb_user, toggle_attachments): - # Prepare test SMB share - with dataset("normal") as normal: - with smb_share("normal", f"/mnt/{normal}"): - # Create an encrypted SMB share, unlocking which might lead to SMB service interruption - with dataset("encrypted", passphrase_encryption()) as encrypted: - with smb_share("encrypted", f"/mnt/{encrypted}"): - ssh(f"touch /mnt/{encrypted}/secret") - results = POST("/service/start/", {"service": "cifs"}) - assert results.status_code == 200, results.text - lock_dataset(encrypted) - # Mount test SMB share - with smb_connection( - share="normal", - username=SMB_USER, - password=SMB_PASSWORD - ) as normal_connection: - # Locked share should not be mountable - with pytest.raises(NTSTATUSError) as e: - with smb_connection( - share="encrypted", - username=SMB_USER, - password=SMB_PASSWORD - ): - pass - - assert e.value.args[0] == ntstatus.NT_STATUS_BAD_NETWORK_NAME - - conn = normal_connection.show_connection() - assert conn['connected'], conn - unlock_dataset(encrypted, {"toggle_attachments": toggle_attachments}) - - conn = normal_connection.show_connection() - assert conn['connected'], conn - - if toggle_attachments: - # We should be able to mount encrypted share - with smb_connection( - share="encrypted", - username=SMB_USER, - password=SMB_PASSWORD - ) as encrypted_connection: - assert [x["name"] for x in encrypted_connection.ls("")] == ["secret"] - else: - # We should still not be able to mount encrypted share as we did not reload attachments - with pytest.raises(NTSTATUSError) as e: - with smb_connection( - share="encrypted", - username=SMB_USER, - password=SMB_PASSWORD - ): - pass - - assert e.value.args[0] == ntstatus.NT_STATUS_BAD_NETWORK_NAME - results = POST("/service/stop/", {"service": "cifs"}) - assert results.status_code == 200, results.text diff --git a/tests/api2/test_account.py b/tests/api2/test_account.py deleted file mode 100644 index b22eeabb08646..0000000000000 --- a/tests/api2/test_account.py +++ /dev/null @@ -1,177 +0,0 @@ -import os -import sys - -import pytest - -from middlewared.test.integration.assets.account import user, group -from middlewared.test.integration.assets.api_key import api_key -from middlewared.test.integration.utils import call, client -from middlewared.test.integration.utils.audit import expect_audit_method_calls - -sys.path.append(os.getcwd()) -from functions import DELETE, POST, PUT - - -@pytest.mark.parametrize("api", ["ws", "rest"]) -def test_create_account_audit(api): - user_id = None - try: - with expect_audit_method_calls([{ - "method": "user.create", - "params": [ - { - "username": "sergey", - "full_name": "Sergey", - "group_create": True, - "home": "/nonexistent", - "password": "********", - } - ], - "description": "Create user sergey", - }]): - payload = { - "username": "sergey", - "full_name": "Sergey", - "group_create": True, - "home": "/nonexistent", - "password": "password", - } - if api == "ws": - user_id = call("user.create", payload) - elif api == "rest": - result = POST(f"/user/", payload) - assert result.status_code == 200, result.text - user_id = result.json() - else: - raise ValueError(api) - finally: - if user_id is not None: - call("user.delete", user_id) - - -@pytest.mark.parametrize("api", ["ws", "rest"]) -def test_update_account_audit(api): - with user({ - "username": "user2", - "full_name": "user2", - "group_create": True, - "password": "test1234", - }) as u: - with expect_audit_method_calls([{ - "method": "user.update", - "params": [u["id"], {}], - "description": "Update user user2", - }]): - if api == "ws": - call("user.update", u["id"], {}) - elif api == "rest": - result = PUT(f"/user/id/{u['id']}", {}) - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -@pytest.mark.parametrize("api", ["ws", "rest"]) -def test_delete_account_audit(api): - with user({ - "username": "user2", - "full_name": "user2", - "group_create": True, - "password": "test1234", - }) as u: - with expect_audit_method_calls([{ - "method": "user.delete", - "params": [u["id"], {}], - "description": "Delete user user2", - }]): - if api == "ws": - call("user.delete", u["id"], {}) - elif api == "rest": - result = DELETE(f"/user/id/{u['id']}") - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -@pytest.mark.parametrize("api", ["ws", "rest"]) -def test_create_group_audit(api): - group_id = None - try: - with expect_audit_method_calls([{ - "method": "group.create", - "params": [ - { - "name": "group2", - } - ], - "description": "Create group group2", - }]): - payload = { - "name": "group2", - } - if api == "ws": - group_id = call("group.create", payload) - elif api == "rest": - result = POST(f"/group/", payload) - assert result.status_code == 200, result.text - group_id = result.json() - else: - raise ValueError(api) - finally: - if group_id is not None: - call("group.delete", group_id) - - -@pytest.mark.parametrize("api", ["ws", "rest"]) -def test_update_group_audit(api): - with group({ - "name": "group2", - }) as g: - with expect_audit_method_calls([{ - "method": "group.update", - "params": [g["id"], {}], - "description": "Update group group2", - }]): - if api == "ws": - call("group.update", g["id"], {}) - elif api == "rest": - result = PUT(f"/group/id/{g['id']}", {}) - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -@pytest.mark.parametrize("api", ["ws", "rest"]) -def test_delete_group_audit(api): - with group({ - "name": "group2", - }) as g: - with expect_audit_method_calls([{ - "method": "group.delete", - "params": [g["id"], {}], - "description": "Delete group group2", - }]): - if api == "ws": - call("group.delete", g["id"], {}) - elif api == "rest": - result = DELETE(f"/group/id/{g['id']}") - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -def test_update_account_using_api_key(): - with api_key([{"method": "CALL", "resource": "user.update"}]) as key: - with client(auth=None) as c: - assert c.call("auth.login_with_api_key", key) - - c.call("user.update", 1, {}) - - -def test_update_account_using_token(): - token = call("auth.generate_token", 300) - - with client(auth=None) as c: - assert c.call("auth.login_with_token", token) - - c.call("user.update", 1, {}) diff --git a/tests/api2/test_account_duplicate_uid_gid.py b/tests/api2/test_account_duplicate_uid_gid.py deleted file mode 100644 index 42d96af268439..0000000000000 --- a/tests/api2/test_account_duplicate_uid_gid.py +++ /dev/null @@ -1,102 +0,0 @@ -import errno - -import pytest - -from middlewared.service_exception import ValidationErrors, ValidationError -from middlewared.test.integration.assets.account import user, group -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call - - -@pytest.fixture(scope="module") -def uid_1234(): - with dataset(f"user1_homedir") as user1_homedir: - with user({ - "username": "user1", - "full_name": "user1", - "group_create": True, - "groups": [], - "home": f"/mnt/{user1_homedir}", - "password": "test1234", - "uid": 1234, - }) as uid_1234: - yield uid_1234 - - -@pytest.fixture(scope="module") -def gid_1234(): - with group({ - "name": "group1", - "gid": 1234, - }) as gid_1234: - yield gid_1234 - - -def test_create_duplicate_uid(uid_1234): - with dataset(f"user2_homedir") as user2_homedir: - with pytest.raises(ValidationErrors) as ve: - with user({ - "username": "user2", - "full_name": "user2", - "group_create": True, - "groups": [], - "home": f"/mnt/{user2_homedir}", - "password": "test1234", - "uid": 1234, - }): - pass - - assert ve.value.errors == [ - ValidationError('user_create.uid', 'Uid 1234 is already used (user user1 has it)', errno.EEXIST), - ] - - -def test_update_duplicate_uid(uid_1234): - with dataset(f"user2_homedir") as user2_homedir: - with user({ - "username": "user2", - "full_name": "user2", - "group_create": True, - "groups": [], - "home": f"/mnt/{user2_homedir}", - "password": "test1234", - }) as user2: - with pytest.raises(ValidationErrors) as ve: - call("user.update", user2["id"], {"uid": 1234}) - - assert ve.value.errors == [ - ValidationError('user_update.uid', 'Uid 1234 is already used (user user1 has it)', errno.EEXIST), - ] - - -def test_update_no_duplicate_uid(uid_1234): - call("user.update", uid_1234["id"], {"uid": 1234}) - - -def test_create_duplicate_gid(gid_1234): - with pytest.raises(ValidationErrors) as ve: - with group({ - "name": "group2", - "gid": 1234, - }): - pass - - assert ve.value.errors == [ - ValidationError('group_create.gid', 'Gid 1234 is already used (group group1 has it)', errno.EEXIST), - ] - - -def test_update_duplicate_gid(gid_1234): - with group({ - "name": "group2", - }) as group2: - with pytest.raises(ValidationErrors) as ve: - call("group.update", group2["id"], {"gid": 1234}) - - assert ve.value.errors == [ - ValidationError('group_update.gid', 'Gid 1234 is already used (group group1 has it)', errno.EEXIST), - ] - - -def test_update_no_duplicate_gid(gid_1234): - call("group.update", gid_1234["id"], {"gid": 1234}) diff --git a/tests/api2/test_account_idmap.py b/tests/api2/test_account_idmap.py deleted file mode 100644 index 746955600ea3c..0000000000000 --- a/tests/api2/test_account_idmap.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import sys - -import pytest - -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.utils import call, client - -LOCAL_USER_SID_PREFIX = 'S-1-22-1-' -LOCAL_GROUP_SID_PREFIX = 'S-1-22-2-' - -def test_uid_idmapping(): - with user({ - 'username': 'idmap_user', - 'full_name': 'idmap_user', - 'smb': True, - 'group_create': True, - 'password': 'test1234', - }) as u: - UNIX_SID = LOCAL_USER_SID_PREFIX + str(u['uid']) - results = call('idmap.convert_sids', [UNIX_SID]) - assert results['unmapped'] == {} - assert UNIX_SID in results['mapped'] - - entry = results['mapped'][UNIX_SID] - - assert entry['id_type'] == 'USER' - assert entry['id'] == u['uid'] - assert entry['name'] == 'Unix User\\idmap_user' - - results = call('idmap.convert_unixids', [{ - 'id_type': 'USER', - 'id': u['uid'], - }]) - - assert results['unmapped'] == {} - entry = results['mapped'][f'UID:{u["uid"]}'] - assert entry['id_type'] == 'USER' - assert entry['id'] == u['uid'] - pdb_sid = entry['sid'] - - user_obj = call('user.get_user_obj', {'uid': u['uid'], 'sid_info': True}) - assert 'sid' in user_obj - assert user_obj['sid'] == pdb_sid diff --git a/tests/api2/test_account_privilege.py b/tests/api2/test_account_privilege.py deleted file mode 100644 index c172488ad0bb7..0000000000000 --- a/tests/api2/test_account_privilege.py +++ /dev/null @@ -1,209 +0,0 @@ -import errno -import os -import sys -import types - -import pytest - -from middlewared.service_exception import CallError, ValidationErrors -from middlewared.test.integration.assets.account import group, privilege, root_with_password_disabled -from middlewared.test.integration.utils import call, mock -from middlewared.test.integration.utils.audit import expect_audit_method_calls - -sys.path.append(os.getcwd()) -from functions import DELETE, POST, PUT - - -def test_change_local_administrator_groups_to_invalid(): - operator = call("group.query", [["group", "=", "operator"]], {"get": True}) - - with pytest.raises(ValidationErrors) as ve: - call("privilege.update", 1, {"local_groups": [operator["id"]]}) - - assert ve.value.errors[0].attribute == "privilege_update.local_groups" - - -def test_change_local_administrator_allowlist(): - with pytest.raises(ValidationErrors) as ve: - call("privilege.update", 1, {"allowlist": [{"method": "CALL", "resource": "system.info"}]}) - - assert ve.value.errors[0].attribute == "privilege_update.allowlist" - - -def test_change_local_administrator_roles(): - with pytest.raises(ValidationErrors) as ve: - call("privilege.update", 1, {"roles": ['READONLY_ADMIN']}) - - assert ve.value.errors[0].attribute == "privilege_update.roles" - - -def test_delete_local_administrator(): - with pytest.raises(CallError) as ve: - call("privilege.delete", 1) - - assert ve.value.errno == errno.EPERM - - -def test_invalid_local_group(): - with pytest.raises(ValidationErrors) as ve: - call("privilege.create", { - "name": "Test", - "local_groups": [1024], # invalid local group ID - "ds_groups": [], - "allowlist": [{"method": "CALL", "resource": "system.info"}], - "web_shell": False, - }) - - assert ve.value.errors[0].attribute == "privilege_create.local_groups.0" - - -def test_delete_local_administrator_group(): - with group({ - "name": "test_local_admins", - }) as g: - local_groups = [lg["gid"] for lg in call("privilege.get_instance", 1)["local_groups"]] - call("privilege.update", 1, {"local_groups": local_groups + [g["gid"]]}) - - with pytest.raises(CallError) as ve: - call("group.delete", g["id"]) - - assert ve.value.errmsg.startswith("This group is used by privilege") - - call("privilege.update", 1, {"local_groups": local_groups}) - - -@pytest.fixture(scope="module") -def privilege_with_orphan_local_group(): - with group({ - "name": "test_orphan", - "smb": False, - }) as g: - gid = g["gid"] - privilege = call("privilege.create", { - "name": "Test orphan", - "local_groups": [gid], - "ds_groups": [], - "allowlist": [{"method": "CALL", "resource": "system.info"}], - "web_shell": False, - }) - call("datastore.delete", "account.bsdgroups", g["id"]) - call("etc.generate", "user") - call("idmap.gencache.flush") - - yield types.SimpleNamespace(gid=gid, privilege=privilege) - - call("privilege.delete", privilege["id"]) - - -def test_create_group_with_orphan_privilege_gid(privilege_with_orphan_local_group): - with pytest.raises(ValidationErrors) as ve: - with group({ - "name": "test_orphan_duplicate", - "gid": privilege_with_orphan_local_group.gid, - }): - pass - - assert ve.value.errors[0].attribute == "group_create.gid" - assert ve.value.errors[0].errmsg.startswith("A privilege 'Test orphan' already uses this group ID.") - - -def test_group_next_gid(): - next_gid = call("group.get_next_gid") - with mock("privilege.used_local_gids", f""" - async def mock(self): - result = await self.used_local_gids() - result[{next_gid}] = None - return result - """): - assert call("group.get_next_gid") == next_gid + 1 - - -def test_remove_only_local_administrator_password_enabled_user(): - root = call("user.query", [["username", "=", "root"]], {"get": True}) - with pytest.raises(ValidationErrors) as ve: - call("user.update", root["id"], {"password_disabled": True}) - - assert ve.value.errors[0].attribute == "user_update.password_disabled" - assert ve.value.errors[0].errmsg == ( - "After disabling password for this user no password-enabled local user will have built-in privilege " - "'Local Administrator'." - ) - - -def test_password_disabled_root_is_a_local_administrator(): - with root_with_password_disabled(): - local_administrators = call("privilege.local_administrators") - - assert len(local_administrators) == 1 - assert local_administrators[0]["username"] == "root" - - -@pytest.mark.parametrize("api", ["ws", "rest"]) -def test_create_privilege_audit(api): - privilege = None - try: - with expect_audit_method_calls([{ - "method": "privilege.create", - "params": [ - { - "name": "Test", - "web_shell": False, - } - ], - "description": "Create privilege Test", - }]): - payload = { - "name": "Test", - "web_shell": False, - } - if api == "ws": - privilege = call("privilege.create", payload) - elif api == "rest": - result = POST(f"/privilege/", payload) - assert result.status_code == 200, result.text - privilege = result.json() - else: - raise ValueError(api) - finally: - if privilege is not None: - call("privilege.delete", privilege["id"]) - - -@pytest.mark.parametrize("api", ["ws", "rest"]) -def test_update_privilege_audit(api): - with privilege({ - "name": "Test", - "web_shell": False, - }) as p: - with expect_audit_method_calls([{ - "method": "privilege.update", - "params": [p["id"], {}], - "description": "Update privilege Test", - }]): - if api == "ws": - call("privilege.update", p["id"], {}) - elif api == "rest": - result = PUT(f"/privilege/id/{p['id']}", {}) - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -@pytest.mark.parametrize("api", ["ws", "rest"]) -def test_delete_privilege_audit(api): - with privilege({ - "name": "Test", - "web_shell": False, - }) as p: - with expect_audit_method_calls([{ - "method": "privilege.delete", - "params": [p["id"]], - "description": "Delete privilege Test", - }]): - if api == "ws": - call("privilege.delete", p["id"]) - elif api == "rest": - result = DELETE(f"/privilege/id/{p['id']}") - assert result.status_code == 200, result.text - else: - raise ValueError(api) diff --git a/tests/api2/test_account_privilege_authentication.py b/tests/api2/test_account_privilege_authentication.py deleted file mode 100644 index 90b74a049a4fa..0000000000000 --- a/tests/api2/test_account_privilege_authentication.py +++ /dev/null @@ -1,204 +0,0 @@ -import errno -import json -import logging - -import pytest -import websocket - -from truenas_api_client import ClientException -from middlewared.test.integration.assets.account import user, unprivileged_user as unprivileged_user_template -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, client, ssh, websocket_url -from middlewared.test.integration.utils.shell import assert_shell_works - -logger = logging.getLogger(__name__) - - -@pytest.fixture(scope="module") -def unprivileged_user(): - with unprivileged_user_template( - username="unprivileged", - group_name="unprivileged_users", - privilege_name="Unprivileged users", - allowlist=[{"method": "CALL", "resource": "system.info"}], - web_shell=False, - ) as t: - yield t - - -@pytest.fixture() -def unprivileged_user_token(unprivileged_user): - with client(auth=(unprivileged_user.username, unprivileged_user.password)) as c: - return c.call("auth.generate_token", 300, {}, True) - - -@pytest.fixture(scope="module") -def unprivileged_user_with_web_shell(): - with unprivileged_user_template( - username="unprivilegedws", - group_name="unprivileged_users_ws", - privilege_name="Unprivileged users with web shell", - allowlist=[], - web_shell=True, - ) as t: - yield t - - -@pytest.fixture() -def unprivileged_user_with_web_shell_token(unprivileged_user_with_web_shell): - with client(auth=(unprivileged_user_with_web_shell.username, unprivileged_user_with_web_shell.password)) as c: - return c.call("auth.generate_token", 300, {}, True) - - -def test_libpam_auth(unprivileged_user): - pam_resp = call('auth.libpam_authenticate', unprivileged_user.username, unprivileged_user.password) - assert pam_resp['code'] == 0 - assert pam_resp['reason'] == 'Success' - - pam_resp = call('auth.libpam_authenticate', unprivileged_user.username, 'CANARY') - assert pam_resp['code'] == 7 - assert pam_resp['reason'] == 'Authentication failure' - - -def test_websocket_auth_session_list_terminate(unprivileged_user): - with client(auth=(unprivileged_user.username, unprivileged_user.password)) as c: - sessions = call("auth.sessions") - my_sessions = [ - s for s in sessions - if s["credentials"] == "LOGIN_PASSWORD" and s["credentials_data"]["username"] == unprivileged_user.username - ] - assert len(my_sessions) == 1, sessions - - call("auth.terminate_session", my_sessions[0]["id"]) - - with pytest.raises(Exception): - c.call("system.info") - - sessions = call("auth.sessions") - assert not [ - s for s in sessions - if s["credentials"] == "LOGIN_PASSWORD" and s["credentials_data"]["username"] == unprivileged_user.username - ], sessions - - -def test_websocket_auth_terminate_all_other_sessions(unprivileged_user): - with client(auth=(unprivileged_user.username, unprivileged_user.password)) as c: - call("auth.terminate_other_sessions") - - with pytest.raises(Exception): - c.call("system.info") - - -def test_websocket_auth_get_methods(unprivileged_user): - with client(auth=(unprivileged_user.username, unprivileged_user.password)) as c: - methods = c.call("core.get_methods") - - assert "system.info" in methods - assert "pool.create" not in methods - - -def test_websocket_auth_calls_allowed_method(unprivileged_user): - with client(auth=(unprivileged_user.username, unprivileged_user.password)) as c: - c.call("system.info") - - -def test_websocket_auth_fails_to_call_forbidden_method(unprivileged_user): - with client(auth=(unprivileged_user.username, unprivileged_user.password)) as c: - with pytest.raises(ClientException) as ve: - c.call("pool.create") - - assert ve.value.errno == errno.EACCES - - -def test_unix_socket_auth_get_methods(unprivileged_user): - methods = json.loads(ssh(f"sudo -u {unprivileged_user.username} midclt call core.get_methods")) - - assert "system.info" in methods - assert "pool.create" not in methods - - -def test_unix_socket_auth_calls_allowed_method(unprivileged_user): - ssh(f"sudo -u {unprivileged_user.username} midclt call system.info") - - -def test_unix_socket_auth_fails_to_call_forbidden_method(unprivileged_user): - result = ssh(f"sudo -u {unprivileged_user.username} midclt call pool.create", check=False, complete_response=True) - assert "Not authorized" in result["stderr"] - - -def test_unix_socket_auth_fails_when_user_has_no_privilege(): - with dataset(f"noconnect_homedir") as homedir: - with user({ - "username": "noconnect", - "full_name": "Noconnect", - "group_create": True, - "groups": [], - "home": f"/mnt/{homedir}", - "password": "test1234", - }): - result = ssh(f"sudo -u noconnect midclt call pool.create", check=False, complete_response=True) - assert "Not authenticated" in result["stderr"] - - -def test_token_auth_session_list_terminate(unprivileged_user, unprivileged_user_token): - with client(auth=None) as c: - assert c.call("auth.login_with_token", unprivileged_user_token) - - sessions = call("auth.sessions") - my_sessions = [ - s for s in sessions - if ( - s["credentials"] == "TOKEN" and - s["credentials_data"]["parent"]["credentials"] == "LOGIN_PASSWORD" and - s["credentials_data"]["parent"]["credentials_data"]["username"] == unprivileged_user.username - ) - ] - assert len(my_sessions) == 1, sessions - - call("auth.terminate_session", my_sessions[0]["id"]) - - with pytest.raises(Exception): - c.call("system.info") - - -def test_token_auth_calls_allowed_method(unprivileged_user_token): - with client(auth=None) as c: - assert c.call("auth.login_with_token", unprivileged_user_token) - - c.call("system.info") - - -def test_token_auth_fails_to_call_forbidden_method(unprivileged_user_token): - with client(auth=None) as c: - assert c.call("auth.login_with_token", unprivileged_user_token) - - with pytest.raises(ClientException) as ve: - c.call("pool.create") - - assert ve.value.errno == errno.EACCES - - -def test_drop_privileges(unprivileged_user_token): - with client() as c: - # This should drop privileges for the current root session - assert c.call("auth.login_with_token", unprivileged_user_token) - - with pytest.raises(ClientException) as ve: - c.call("pool.create") - - assert ve.value.errno == errno.EACCES - - -def test_token_auth_working_not_working_web_shell(unprivileged_user_token): - ws = websocket.create_connection(websocket_url() + "/websocket/shell") - try: - ws.send(json.dumps({"token": unprivileged_user_token})) - resp_opcode, msg = ws.recv_data() - assert json.loads(msg.decode())["msg"] == "failed" - finally: - ws.close() - - -@pytest.mark.timeout(30) -def test_token_auth_working_web_shell(unprivileged_user_with_web_shell_token): - assert_shell_works(unprivileged_user_with_web_shell_token, "unprivilegedws") diff --git a/tests/api2/test_account_privilege_role.py b/tests/api2/test_account_privilege_role.py deleted file mode 100644 index 3f5e46fd6dd6c..0000000000000 --- a/tests/api2/test_account_privilege_role.py +++ /dev/null @@ -1,187 +0,0 @@ -import errno -import logging - -import pytest - -from truenas_api_client import ClientException -from middlewared.test.integration.assets.account import unprivileged_user_client -from middlewared.test.integration.assets.pool import dataset, snapshot -from middlewared.test.integration.utils import client -from time import sleep - -logger = logging.getLogger(__name__) - - -@pytest.mark.parametrize("role", ["SNAPSHOT_READ", "SNAPSHOT_WRITE"]) -def test_can_read_with_read_or_write_role(role): - with dataset("test_snapshot_read") as ds: - with snapshot(ds, "test"): - with unprivileged_user_client([role]) as c: - assert len(c.call("zfs.snapshot.query", [["dataset", "=", ds]])) == 1 - - -def test_can_not_write_with_read_role(): - with dataset("test_snapshot_write1") as ds: - with unprivileged_user_client(["SNAPSHOT_READ"]) as c: - with pytest.raises(ClientException) as ve: - c.call("zfs.snapshot.create", { - "dataset": ds, - "name": "test", - }) - - assert ve.value.errno == errno.EACCES - - -def test_write_with_write_role(): - with dataset("test_snapshot_write2") as ds: - with unprivileged_user_client(["SNAPSHOT_WRITE"]) as c: - c.call("zfs.snapshot.create", { - "dataset": ds, - "name": "test", - }) - - -def test_can_delete_with_write_role_with_separate_delete(): - with dataset("test_snapshot_delete1") as ds: - with snapshot(ds, "test") as id: - with unprivileged_user_client(["SNAPSHOT_DELETE"]) as c: - c.call("zfs.snapshot.delete", id) - - -def test_can_not_delete_with_write_role_with_separate_delete(): - with dataset("test_snapshot_delete2") as ds: - with snapshot(ds, "test") as id: - with unprivileged_user_client(["SNAPSHOT_WRITE"]) as c: - with pytest.raises(ClientException) as ve: - c.call("zfs.snapshot.delete", id) - - assert ve.value.errno == errno.EACCES - - -def test_works_for_redefined_crud_method(): - with unprivileged_user_client(["SHARING_ADMIN"]) as c: - c.call("service.update", "cifs", {"enable": False}) - - -def test_full_admin_role(): - with unprivileged_user_client(["FULL_ADMIN"]) as c: - c.call("system.general.config") - - # User with FULL_ADMIN role should have something in jobs list - assert len(c.call("core.get_jobs")) != 0 - - # attempt to wait / cancel job should not fail - jid = c.call("core.job_test", {"sleep": 1}) - - c.call("core.job_wait", jid, job=True) - - c.call("core.job_abort", jid) - - -@pytest.mark.parametrize("role,method,params", [ - ("DATASET_READ", "pool.dataset.checksum_choices", []), -]) -def test_read_role_can_call_method(role, method, params): - with unprivileged_user_client([role]) as c: - c.call(method, *params) - - -@pytest.mark.parametrize("method,params", [ - ("system.general.config", []), - ("user.get_instance", [1]), - ("user.query", []), - ("user.shell_choices", []), - ("auth.me", []), - ("filesystem.listdir", ["/"]), - ("filesystem.stat", ["/"]), - ("filesystem.getacl", ["/"]), - ("filesystem.acltemplate.by_path", [{"path": "/"}]), - ("pool.dataset.details", []), - ("core.get_jobs", []), -]) -def test_readonly_can_call_method(method, params): - with unprivileged_user_client(["READONLY_ADMIN"]) as c: - c.call(method, *params) - - -def test_readonly_can_not_call_method(): - with unprivileged_user_client(["READONLY_ADMIN"]) as c: - with pytest.raises(ClientException) as ve: - c.call("user.create") - - assert ve.value.errno == errno.EACCES - - with pytest.raises(ClientException) as ve: - # fails with EPERM if API access granted - c.call("filesystem.mkdir", "/foo") - - assert ve.value.errno == errno.EACCES - - -def test_limited_user_can_set_own_attributes(): - with unprivileged_user_client(["READONLY_ADMIN"]) as c: - c.call("auth.set_attribute", "foo", "bar") - attrs = c.call("auth.me")["attributes"] - assert "foo" in attrs - assert attrs["foo"] == "bar" - - -def test_limited_user_auth_token_behavior(): - with unprivileged_user_client(["READONLY_ADMIN"]) as c: - auth_token = c.call("auth.generate_token") - - with client(auth=None) as c2: - assert c2.call("auth.login_with_token", auth_token) - c2.call("auth.me") - c2.call("core.get_jobs") - - -def test_sharing_manager_jobs(): - with unprivileged_user_client(["SHARING_ADMIN"]) as c: - auth_token = c.call("auth.generate_token") - jid = c.call("core.job_test", {"sleep": 1}) - - with client(auth=None) as c2: - #c.call("core.job_wait", jid, job=True) - assert c2.call("auth.login_with_token", auth_token) - wait_job_id = c2.call("core.job_wait", jid) - sleep(2) - result = c2.call("core.get_jobs", [["id", "=", wait_job_id]], {"get": True}) - assert result["state"] == "SUCCESS" - c2.call("core.job_abort", wait_job_id) - - -def test_foreign_job_access(): - with unprivileged_user_client(["READONLY_ADMIN"]) as unprivileged: - with client() as c: - job = c.call("core.job_test") - - wait_job_id = unprivileged.call("core.job_wait", job) - sleep(2) - result = unprivileged.call("core.get_jobs", [["id", "=", wait_job_id]], {"get": True}) - assert result["state"] != "SUCCESS" - - jobs = unprivileged.call("core.get_jobs", [["id", "=", job]]) - assert jobs == [] - - with unprivileged_user_client(["FULL_ADMIN"]) as unprivileged: - with client() as c: - job = c.call("core.job_test") - - wait_job_id = unprivileged.call("core.job_wait", job) - sleep(2) - result = unprivileged.call("core.get_jobs", [["id", "=", wait_job_id]], {"get": True}) - assert result["state"] == "SUCCESS" - - -def test_can_not_subscribe_to_event(): - with unprivileged_user_client() as unprivileged: - with pytest.raises(ValueError) as ve: - unprivileged.subscribe("alert.list", lambda *args, **kwargs: None) - - assert ve.value.args[0]["errname"] == "EACCES" - - -def test_can_subscribe_to_event(): - with unprivileged_user_client(["READONLY_ADMIN"]) as unprivileged: - unprivileged.subscribe("alert.list", lambda *args, **kwargs: None) diff --git a/tests/api2/test_account_privilege_role_forbidden_fields.py b/tests/api2/test_account_privilege_role_forbidden_fields.py deleted file mode 100644 index 1bd34eb517946..0000000000000 --- a/tests/api2/test_account_privilege_role_forbidden_fields.py +++ /dev/null @@ -1,57 +0,0 @@ -import pytest - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.account import unprivileged_user_client -from middlewared.test.integration.assets.cloud_sync import local_ftp_credential -from middlewared.test.integration.assets.pool import dataset - - -@pytest.fixture(scope="module") -def unprivileged_client(): - with unprivileged_user_client(["CLOUD_BACKUP_WRITE", "CLOUD_SYNC_WRITE"]) as c: - yield c - - -@pytest.fixture(scope="module") -def cloudsync_template(): - with local_ftp_credential() as credential: - with dataset("cloud_backup") as local_dataset: - yield { - "path": f"/mnt/{local_dataset}", - "credentials": credential["id"], - "attributes": { - "folder": "", - }, - } - - -@pytest.mark.parametrize("param,value", [ - ("pre_script", "rm -rf /"), - ("post_script", "rm -rf /"), -]) -def test_cloud_backup(unprivileged_client, cloudsync_template, param, value): - with pytest.raises(ValidationErrors) as ve: - unprivileged_client.call("cloud_backup.create", { - **cloudsync_template, - "password": "test", - "keep_last": 10, - param: value, - }) - - assert any(error.attribute == f"cloud_backup_create.{param}" for error in ve.value.errors), ve - - -@pytest.mark.parametrize("param,value", [ - ("pre_script", "rm -rf /"), - ("post_script", "rm -rf /"), -]) -def test_cloud_sync(unprivileged_client, cloudsync_template, param, value): - with pytest.raises(ValidationErrors) as ve: - unprivileged_client.call("cloudsync.create", { - **cloudsync_template, - "direction": "PUSH", - "transfer_mode": "COPY", - param: value, - }) - - assert any(error.attribute == f"cloud_sync_create.{param}" for error in ve.value.errors), ve diff --git a/tests/api2/test_account_privilege_role_private_fields.py b/tests/api2/test_account_privilege_role_private_fields.py deleted file mode 100644 index b1decf1bc31ed..0000000000000 --- a/tests/api2/test_account_privilege_role_private_fields.py +++ /dev/null @@ -1,235 +0,0 @@ -import contextlib - -import pytest - -from middlewared.test.integration.assets.account import unprivileged_user_client -from middlewared.test.integration.assets.api_key import api_key -from middlewared.test.integration.assets.cloud_backup import task as cloud_backup_task -from middlewared.test.integration.assets.cloud_sync import local_ftp_credential, local_ftp_task -from middlewared.test.integration.assets.crypto import root_certificate_authority -from middlewared.test.integration.assets.datastore import row -from middlewared.test.integration.assets.keychain import ssh_keypair -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, client, mock - -REDACTED = "********" - - -@pytest.fixture(scope="module") -def readonly_client(): - with unprivileged_user_client(["READONLY_ADMIN"]) as c: - yield c - - -@contextlib.contextmanager -def wrap(id): - yield id - - -@contextlib.contextmanager -def certificateauthority(): - with root_certificate_authority("ca_test") as ca: - yield ca["id"] - - -@contextlib.contextmanager -def cloudbackup(): - with local_ftp_credential() as credential: - with dataset("cloud_backup") as local_dataset: - with mock("cloud_backup.ensure_initialized", return_value=None): - with cloud_backup_task({ - "path": f"/mnt/{local_dataset}", - "credentials": credential["id"], - "attributes": { - "folder": "", - }, - "password": "test", - }) as task: - yield task["id"] - - -@contextlib.contextmanager -def cloudsync_credential(): - with local_ftp_credential() as credential: - yield credential["id"] - - -@contextlib.contextmanager -def cloudsync(): - with local_ftp_task() as task: - yield task["id"] - - -@contextlib.contextmanager -def disk(): - disks = call("disk.query") - yield disks[0]["identifier"] - - -@contextlib.contextmanager -def dns_authenticator(): - with row( - "system.acmednsauthenticator", - { - "authenticator": "cloudflare", - "name": "test", - "attributes": { - "api_key": "key", - "api_token": "token", - }, - }, - ) as id: - yield id - - -@contextlib.contextmanager -def idmap(): - with row( - "directoryservice.idmap_domain", - { - "name": "test", - "dns_domain_name": "test", - "range_low": 1000, - "range_high": 1001, - "idmap_backend": "LDAP", - "options": { - "ldap_base_dn": "cn=BASEDN", - "ldap_user_dn": "cn=USERDN", - "ldap_url": "ldap://127.0.0.1", - "ldap_user_dn_password": "password" - }, - }, - {"prefix": "idmap_domain_"}, - ) as id: - yield id - - -@contextlib.contextmanager -def vm_device(): - with row( - "vm.vm", - { - "id": 5, - "name": "", - "memory": 225 - }): - with row( - "vm.device", - { - "id": 7, - "dtype": "DISPLAY", - "vm": 5, - "attributes": { - "bind": "127.0.0.1", - "port": 1, - "web_port": 1, - "password": "pass", - } - } - ) as id: - yield id - - -@contextlib.contextmanager -def iscsi_auth(): - auth = call("iscsi.auth.create", { - "tag": 1, - "user": "test", - "secret": "secretsecret", - "peeruser": "peeruser", - "peersecret": "peersecretsecret", - }) - try: - yield auth["id"] - finally: - call("iscsi.auth.delete", auth["id"]) - - -@contextlib.contextmanager -def keychaincredential(): - with ssh_keypair() as k: - yield k["id"] - - -@contextlib.contextmanager -def vmware(): - with row( - "storage.vmwareplugin", - { - "password": "password", - }, - ) as id: - yield id - - -@pytest.mark.parametrize("how", ["multiple", "single", "get_instance"]) -@pytest.mark.parametrize("service,id,options,redacted_fields", ( - ("acme.dns.authenticator", dns_authenticator, {}, ["attributes"]), - ("certificate", 1, {}, ["privatekey", "issuer"]), - ("certificateauthority", certificateauthority, {}, ["privatekey", "issuer"]), - ("cloud_backup", cloudbackup, {}, ["credentials.attributes", "password"]), - ("cloudsync.credentials", cloudsync_credential, {}, ["attributes"]), - ("cloudsync", cloudsync, {}, ["credentials.attributes", "encryption_password"]), - ("disk", disk, {"extra": {"passwords": True}}, ["passwd"]), - ("idmap", idmap, {}, ["options.ldap_user_dn_password"]), - ("iscsi.auth", iscsi_auth, {}, ["secret", "peersecret"]), - ("keychaincredential", keychaincredential, {}, ["attributes"]), - ("user", 1, {}, ["unixhash", "smbhash"]), - ("vmware", vmware, {}, ["password"]), - ("vm.device", vm_device, {}, ["attributes.password"]), -)) -def test_crud(readonly_client, how, service, id, options, redacted_fields): - identifier = "id" if service != "disk" else "identifier" - - with (id() if callable(id) else wrap(id)) as id: - if how == "multiple": - result = readonly_client.call(f"{service}.query", [[identifier, "=", id]], options)[0] - elif how == "single": - result = readonly_client.call(f"{service}.query", [[identifier, "=", id]], {**options, "get": True}) - elif how == "get_instance": - result = readonly_client.call(f"{service}.get_instance", id, options) - else: - assert False - - for k in redacted_fields: - obj = result - for path in k.split("."): - obj = obj[path] - - assert obj == REDACTED, (k, obj, REDACTED) - - -@pytest.mark.parametrize("service,redacted_fields", ( - ("system.general", ["ui_certificate"]), - ("ldap", ["bindpw"]), - ("mail", ["pass", "oauth"]), - ("snmp", ["v3_password", "v3_privpassphrase"]), - ("truecommand", ["api_key"]), -)) -def test_config(readonly_client, service, redacted_fields): - result = readonly_client.call(f"{service}.config") - - for k in redacted_fields: - assert result[k] == REDACTED - - -def test_fields_are_visible_if_has_write_access(): - with unprivileged_user_client(["ACCOUNT_WRITE"]) as c: - result = c.call("user.get_instance", 1) - - assert result["unixhash"] != REDACTED - - -def test_fields_are_visible_for_api_key(): - with api_key([{"method": "CALL", "resource": "user.get_instance"}]) as key: - with client(auth=None) as c: - assert c.call("auth.login_with_api_key", key) - result = c.call("user.get_instance", 1) - - assert result["unixhash"] != REDACTED - - -def test_vm_display_device(readonly_client): - with vm_device(): - result = readonly_client.call("vm.get_display_devices", 5) - assert result[0]["attributes"]["password"] == REDACTED diff --git a/tests/api2/test_account_query_roles.py b/tests/api2/test_account_query_roles.py deleted file mode 100644 index e1321031ae3f8..0000000000000 --- a/tests/api2/test_account_query_roles.py +++ /dev/null @@ -1,19 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.account import unprivileged_user_client - - -@pytest.mark.parametrize("role", ["READONLY_ADMIN", "FULL_ADMIN"]) -def test_user_role_in_account(role): - with unprivileged_user_client(roles=[role]) as c: - this_user = c.call("user.query", [["username", "=", c.username]], {"get": True}) - - assert this_user['roles'] == [role] - - -def test_user_role_full_admin_map(): - with unprivileged_user_client(allowlist=[{"method": "*", "resource": "*"}]) as c: - this_user = c.call("user.query", [["username", "=", c.username]], {"get": True}) - - assert "FULL_ADMIN" in this_user["roles"] - assert "HAS_ALLOW_LIST" in this_user["roles"] diff --git a/tests/api2/test_account_root_password.py b/tests/api2/test_account_root_password.py deleted file mode 100644 index de7b4c7178dd2..0000000000000 --- a/tests/api2/test_account_root_password.py +++ /dev/null @@ -1,61 +0,0 @@ -import pytest - -from truenas_api_client import ClientException -from middlewared.test.integration.utils import call, client -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.pool import dataset - - -def test_root_password_disabled(): - with client() as c: - root_user_id = c.call( - "datastore.query", - "account.bsdusers", - [["username", "=", "root"]], - {"get": True, "prefix": "bsdusr_"}, - )["id"] - - c.call("datastore.update", "account.bsdusers", root_user_id, {"bsdusr_password_disabled": True}) - c.call("etc.generate", "user") - try: - alerts = c.call("alert.list") - assert any(alert["klass"] == "WebUiRootLogin" for alert in alerts), alerts - - builtin_administrators_group_id = c.call( - "datastore.query", - "account.bsdgroups", - [["group", "=", "builtin_administrators"]], - {"get": True, "prefix": "bsdgrp_"}, - )["id"] - - with dataset(f"admin_homedir") as homedir: - events = [] - - def callback(type, **message): - events.append((type, message)) - - c.subscribe("user.web_ui_login_disabled", callback, sync=True) - - with user({ - "username": "admin", - "full_name": "Admin", - "group_create": True, - "groups": [builtin_administrators_group_id], - "home": f"/mnt/{homedir}", - "password": "test1234", - }, get_instance=False): - alerts = c.call("alert.list") - assert not any(alert["klass"] == "WebUiRootLogin" for alert in alerts), alerts - - # Root should not be able to log in with password anymore - with pytest.raises(ClientException): - call("system.info", client_kwargs=dict(auth_required=False)) - - assert events[0][1]["fields"]["usernames"] == ["admin"] - - c.call("datastore.update", "account.bsdusers", root_user_id, {"bsdusr_password_disabled": False}) - c.call("etc.generate", "user") - finally: - # In case of a failure - c.call("datastore.update", "account.bsdusers", root_user_id, {"bsdusr_password_disabled": False}) - c.call("etc.generate", "user") diff --git a/tests/api2/test_account_shell_choices.py b/tests/api2/test_account_shell_choices.py deleted file mode 100644 index 29ede61eae80a..0000000000000 --- a/tests/api2/test_account_shell_choices.py +++ /dev/null @@ -1,135 +0,0 @@ -import pytest - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.account import group, user -from middlewared.test.integration.utils import call - - -def test_shell_choices_has_no_privileges(): - with group({ - "name": "test_no_privileges", - }) as g: - assert "/usr/bin/cli" not in call("user.shell_choices", [g["id"]]) - - -def test_shell_choices_has_privileges(): - with group({ - "name": "test_has_privileges", - }) as g: - privilege = call("privilege.create", { - "name": "Test", - "local_groups": [g["gid"]], - "ds_groups": [], - "allowlist": [{"method": "CALL", "resource": "system.info"}], - "web_shell": False, - }) - try: - assert "/usr/bin/cli" in call("user.shell_choices", [g["id"]]) - finally: - call("privilege.delete", privilege["id"]) - - -@pytest.mark.parametrize("group_payload", [ - lambda g: {"group": g["id"]}, - lambda g: {"group_create": True, "groups": [g["id"]]}, -]) -def test_cant_create_user_with_cli_shell_without_privileges(group_payload): - with group({ - "name": "test_no_privileges", - }) as g: - with pytest.raises(ValidationErrors) as ve: - with user({ - "username": "test", - "full_name": "Test", - "home": f"/nonexistent", - "password": "test1234", - "shell": "/usr/bin/cli", - **group_payload(g), - }): - pass - - assert ve.value.errors[0].attribute == "user_create.shell" - - -@pytest.mark.parametrize("group_payload", [ - lambda g: {"group": g["id"]}, - lambda g: {"group_create": True, "groups": [g["id"]]}, -]) -def test_can_create_user_with_cli_shell_with_privileges(group_payload): - with group({ - "name": "test_no_privileges", - }) as g: - privilege = call("privilege.create", { - "name": "Test", - "local_groups": [g["gid"]], - "ds_groups": [], - "allowlist": [{"method": "CALL", "resource": "system.info"}], - "web_shell": False, - }) - try: - with user({ - "username": "test", - "full_name": "Test", - "home": f"/nonexistent", - "password": "test1234", - "shell": "/usr/bin/cli", - **group_payload(g), - }): - pass - finally: - call("privilege.delete", privilege["id"]) - - -@pytest.mark.parametrize("group_payload", [ - lambda g: {"group": g["id"]}, - lambda g: {"groups": [g["id"]]}, -]) -def test_cant_update_user_with_cli_shell_without_privileges(group_payload): - with group({ - "name": "test_no_privileges", - }) as g: - with user({ - "username": "test", - "full_name": "Test", - "home": f"/nonexistent", - "password": "test1234", - "group_create": True, - }) as u: - with pytest.raises(ValidationErrors) as ve: - call("user.update", u["id"], { - "shell": "/usr/bin/cli", - **group_payload(g), - }) - - assert ve.value.errors[0].attribute == "user_update.shell" - - -@pytest.mark.parametrize("group_payload", [ - lambda g: {"group": g["id"]}, - lambda g: {"groups": [g["id"]]}, -]) -def test_can_update_user_with_cli_shell_with_privileges(group_payload): - with group({ - "name": "test_no_privileges", - }) as g: - privilege = call("privilege.create", { - "name": "Test", - "local_groups": [g["gid"]], - "ds_groups": [], - "allowlist": [{"method": "CALL", "resource": "system.info"}], - "web_shell": False, - }) - try: - with user({ - "username": "test", - "full_name": "Test", - "home": f"/nonexistent", - "password": "test1234", - "group_create": True, - }) as u: - call("user.update", u["id"], { - "shell": "/usr/bin/cli", - **group_payload(g), - }) - finally: - call("privilege.delete", privilege["id"]) diff --git a/tests/api2/test_account_ssh_key.py b/tests/api2/test_account_ssh_key.py deleted file mode 100644 index c55698e58331a..0000000000000 --- a/tests/api2/test_account_ssh_key.py +++ /dev/null @@ -1,79 +0,0 @@ -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, ssh - - -def test_account_create_update_ssh_key_in_existing_dir(): - with dataset("home") as ds: - homedir = f"/mnt/{ds}" - with user({ - "username": "test", - "full_name": "Test", - "home": homedir, - "password": "test1234", - "group_create": True, - "sshpubkey": "old", - }) as u: - call("user.delete", u["id"]) - - with user({ - "username": "test", - "full_name": "Test", - "home": homedir, - "password": "test1234", - "group_create": True, - "sshpubkey": "new", - }) as u: - u = call("user.get_instance", u["id"]) - assert u["sshpubkey"] == "new" - - -def test_account_update_ssh_key_and_set_homedir(): - with dataset("home") as ds: - homedir = f"/mnt/{ds}" - - with user({ - "username": "test", - "full_name": "Test", - "password": "test1234", - "group_create": True, - }) as u: - call("user.update", u["id"], { - "home": homedir, - "sshpubkey": "new", - }) - - u = call("user.get_instance", u["id"]) - assert u["sshpubkey"] == "new" - - -def test_account_sets_ssh_key_on_user_create(): - with dataset("home") as ds: - homedir = f"/mnt/{ds}" - - with user({ - "username": "test", - "full_name": "Test", - "home": homedir, - "password": "test1234", - "group_create": True, - "sshpubkey": "old", - }): - assert ssh(f"cat {homedir}/test/.ssh/authorized_keys") == "old\n" - - -def test_account_delete_ssh_key_on_user_delete(): - with dataset("home") as ds: - homedir = f"/mnt/{ds}" - - with user({ - "username": "test", - "full_name": "Test", - "home": homedir, - "password": "test1234", - "group_create": True, - "sshpubkey": "old", - }) as u: - call("user.delete", u["id"]) - - assert ssh(f"cat {homedir}/test/.ssh/authorized_keys", check=False) == "" diff --git a/tests/api2/test_alert_classes.py b/tests/api2/test_alert_classes.py deleted file mode 100644 index d631a7b6d7b94..0000000000000 --- a/tests/api2/test_alert_classes.py +++ /dev/null @@ -1,59 +0,0 @@ -from unittest.mock import ANY - -import pytest -from pytest_dependency import depends - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.utils import call - - - -def test__normal_alert_class(): - value = { - "classes": { - "UPSBatteryLow": { - "level": "CRITICAL", - "policy": "IMMEDIATELY", - }, - }, - } - - call("alertclasses.update", value) - - assert call("alertclasses.config") == {"id": ANY, **value} - - -def test__nonexisting_alert_class(): - with pytest.raises(ValidationErrors) as ve: - call("alertclasses.update", { - "classes": { - "Invalid": { - "level": "WARNING", - }, - }, - }) - - assert ve.value.errors[0].attribute == "alert_class_update.classes.Invalid" - - -def test__disable_proactive_support_for_valid_alert_class(request): - call("alertclasses.update", { - "classes": { - "ZpoolCapacityNotice": { - "proactive_support": False, - }, - }, - }) - - -def test__disable_proactive_support_for_invalid_alert_class(request): - with pytest.raises(ValidationErrors) as ve: - call("alertclasses.update", { - "classes": { - "UPSBatteryLow": { - "proactive_support": False, - }, - }, - }) - - assert ve.value.errors[0].attribute == "alert_class_update.classes.UPSBatteryLow.proactive_support" diff --git a/tests/api2/test_api_key.py b/tests/api2/test_api_key.py deleted file mode 100644 index 7fce2b058a05e..0000000000000 --- a/tests/api2/test_api_key.py +++ /dev/null @@ -1,133 +0,0 @@ -import contextlib -import os - -import pytest - -import sys -sys.path.append(os.getcwd()) -from functions import POST, GET, DELETE, SSH_TEST -from auto_config import password, user as user_ - -from middlewared.test.integration.assets.api_key import api_key -from middlewared.test.integration.utils import call, client -from middlewared.test.integration.utils.client import truenas_server - - -@contextlib.contextmanager -def user(): - results = POST("/user/", { - "username": "testuser", - "full_name": "Test User", - "group_create": True, - "password": "test1234", - }) - assert results.status_code == 200, results.text - id = results.json() - - try: - yield - finally: - results = DELETE(f"/user/id/{id}/") - assert results.status_code == 200, results.text - - -def test_root_api_key_websocket(): - """We should be able to call a method with root API key using Websocket.""" - ip = truenas_server.ip - with api_key([{"method": "*", "resource": "*"}]) as key: - with user(): - cmd = f"sudo -u testuser midclt -u ws://{ip}/websocket --api-key {key} call system.info" - results = SSH_TEST(cmd, user_, password) - assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}' - assert 'uptime' in str(results['stdout']) - - with client(auth=None) as c: - assert c.call("auth.login_with_api_key", key) - - # root-level API key should be able to start / stop services - c.call("service.start", "cifs") - c.call("service.stop", "cifs") - - # root-level API key should be able to enable / disable services - c.call("service.update", "cifs", {"enable": True}) - c.call("service.update", "cifs", {"enable": False}) - - -def test_allowed_api_key_websocket(): - """We should be able to call a method with API key that allows that call using Websocket.""" - ip = truenas_server.ip - with api_key([{"method": "CALL", "resource": "system.info"}]) as key: - with user(): - cmd = f"sudo -u testuser midclt -u ws://{ip}/websocket --api-key {key} call system.info" - results = SSH_TEST(cmd, user_, password) - assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}' - assert 'uptime' in str(results['stdout']) - - -def test_denied_api_key_websocket(): - """We should not be able to call a method with API key that does not allow that call using Websocket.""" - ip = truenas_server.ip - with api_key([{"method": "CALL", "resource": "system.info_"}]) as key: - with user(): - cmd = f"sudo -u testuser midclt -u ws://{ip}/websocket --api-key {key} call system.info" - results = SSH_TEST(cmd, user_, password) - assert results['result'] is False - - -def test_denied_api_key_noauthz(): - with api_key([{"method": "CALL", "resource": "system.info"}]) as key: - with client(auth=None) as c: - assert c.call("auth.login_with_api_key", key) - - # verify API key works as expected - c.call('system.info') - - # system.product_type has no_authz_required - # this should fail do to lack of authorization for - # API key - with pytest.raises(Exception): - c.call("system.version") - - with pytest.raises(Exception): - c.call("service.start", "cifs") - - with pytest.raises(Exception): - c.call("service.update", "cifs", {"enable": True}) - - auth_token = c.call("auth.generate_token") - - with client(auth=None) as c: - assert c.call("auth.login_with_token", auth_token) - - # verify that token has same access rights - c.call('system.info') - - with pytest.raises(Exception): - c.call("system.version") - - with pytest.raises(Exception): - c.call("service.start", "cifs") - - with pytest.raises(Exception): - c.call("service.update", "cifs", {"enable": True}) - - -def test_api_key_auth_session_list_terminate(): - with api_key([{"method": "CALL", "resource": "system.info"}]) as key: - with client(auth=None) as c: - assert c.call("auth.login_with_api_key", key) - - sessions = call("auth.sessions") - my_sessions = [ - s for s in sessions - if ( - s["credentials"] == "API_KEY" and - s["credentials_data"]["api_key"]["name"] == "Test API Key" - ) - ] - assert len(my_sessions) == 1, sessions - - call("auth.terminate_session", my_sessions[0]["id"]) - - with pytest.raises(Exception): - c.call("system.info") diff --git a/tests/api2/test_api_key_crud.py b/tests/api2/test_api_key_crud.py deleted file mode 100644 index a79a41524b981..0000000000000 --- a/tests/api2/test_api_key_crud.py +++ /dev/null @@ -1,49 +0,0 @@ -import contextlib - -from middlewared.test.integration.utils import call, client - - -@contextlib.contextmanager -def api_key(allowlist): - key = call("api_key.create", {"name": "Test API Key", "allowlist": allowlist}) - try: - yield key - finally: - call("api_key.delete", key["id"]) - - -def test_has_key_after_creation_but_not_read(): - key = call("api_key.create", {"name": "Test", "allowlist": []}) - try: - assert "key" in key - - instance = call("api_key.get_instance", key["id"]) - assert "key" not in instance - - update = call("api_key.update", key["id"], {}) - assert "key" not in update - finally: - call("api_key.delete", key["id"]) - - -def test_api_key_reset(): - with api_key([]) as key: - with client(auth=None) as c: - assert c.call("auth.login_with_api_key", key["key"]) - - updated = call("api_key.update", key["id"], {"reset": True}) - - with client(auth=None) as c: - assert not c.call("auth.login_with_api_key", key["key"]) - - with client(auth=None) as c: - assert c.call("auth.login_with_api_key", updated["key"]) - - -def test_api_key_delete(): - with api_key([]) as key: - with client(auth=None) as c: - assert c.call("auth.login_with_api_key", key["key"]) - - with client(auth=None) as c: - assert not c.call("auth.login_with_api_key", key["key"]) diff --git a/tests/api2/test_attachment_querying.py b/tests/api2/test_attachment_querying.py deleted file mode 100644 index 348af8f390a2e..0000000000000 --- a/tests/api2/test_attachment_querying.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from pytest_dependency import depends - -sys.path.append(os.getcwd()) - -from middlewared.test.integration.assets.nfs import nfs_share -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, client - - -PARENT_DATASET = 'test_parent' -CHILD_DATASET = f'{PARENT_DATASET}/child_dataset' - - -def test_attachment_with_child_path(request): - with dataset(PARENT_DATASET) as parent_dataset: - parent_path = f'/mnt/{parent_dataset}' - assert call('pool.dataset.attachments_with_path', parent_path) == [] - - with nfs_share(parent_dataset): - attachments = call('pool.dataset.attachments_with_path', parent_path) - assert len(attachments) > 0, attachments - assert attachments[0]['type'] == 'NFS Share', attachments - - with dataset(CHILD_DATASET) as child_dataset: - child_path = f'/mnt/{child_dataset}' - attachments = call('pool.dataset.attachments_with_path', child_path) - assert len(attachments) == 0, attachments - - attachments = call('pool.dataset.attachments_with_path', child_path, True) - assert len(attachments) == 1, attachments - assert attachments[0]['type'] == 'NFS Share', attachments diff --git a/tests/api2/test_audit_alerts.py b/tests/api2/test_audit_alerts.py deleted file mode 100644 index ac1d64a9c9b85..0000000000000 --- a/tests/api2/test_audit_alerts.py +++ /dev/null @@ -1,123 +0,0 @@ -import pytest - -from middlewared.test.integration.utils import call, ssh, mock -from time import sleep - - -@pytest.fixture(scope='function') -def setup_state(request): - """ - Parametrize the test setup - The hope was that both 'backend' and 'setup' one-shot tests would be similar, however - the 'setup' test ended up requiring 'with mock' - """ - path = '/audit' - alert_key = request.param[0] - if alert_key is not None: - path += f"/{alert_key}.db" - alert_class = request.param[1] - restore_data = None - try: - # Remove any pre-existing alert cruft - call('alert.oneshot_delete', alert_class, alert_key if alert_key is None else {'service': alert_key}) - - alerts = call("alert.list") - class_alerts = [alert for alert in alerts if alert['klass'] == alert_class] - assert len(class_alerts) == 0, class_alerts - match alert_class: - case 'AuditBackendSetup': - # A file in the dataset: set it immutable - ssh(f'chattr +i {path}') - lsattr = ssh(f'lsattr {path}') - assert lsattr[4] == 'i', lsattr - restore_data = path - case 'AuditDatasetCleanup': - # Directly tweak the zfs settings - call( - "zfs.dataset.update", - "boot-pool/ROOT/24.10.0-MASTER-20240709-021413/audit", - {"properties": {"org.freenas:refquota_warning": {"parsed": "70"}}} - ) - case _: - pass - yield request.param - finally: - match alert_class: - case 'AuditBackendSetup': - # Remove immutable flag from file - assert restore_data != "" - ssh(f'chattr -i {restore_data}') - lsattr = ssh(f'lsattr {restore_data}') - assert lsattr[4] == '-', lsattr - # Restore backend file descriptors and dismiss alerts - call('auditbackend.setup') - case 'AuditSetup': - # Dismiss alerts - call('audit.setup') - case _: - pass - # call('alert.oneshot_delete', alert_class, alert_key if alert_key is None else {'service': alert_key}) - sleep(1) - alerts = call("alert.list") - class_alerts = [alert for alert in alerts if alert['klass'] == alert_class] - assert len(class_alerts) == 0, class_alerts - - -@pytest.mark.parametrize( - 'setup_state', [ - ['SMB', 'AuditBackendSetup', 'auditbackend.setup'], - ], - indirect=True -) -def test_audit_backend_alert(setup_state): - db_path, alert_class, audit_method = setup_state - call(audit_method) - sleep(1) - alerts = call("alert.list") - class_alerts = [alert for alert in alerts if alert['klass'] == alert_class] - assert len(class_alerts) > 0, class_alerts - assert class_alerts[0]['klass'] == 'AuditBackendSetup', class_alerts - assert class_alerts[0]['args']['service'] == db_path, class_alerts - assert class_alerts[0]['formatted'].startswith("Audit service failed backend setup"), class_alerts - - -@pytest.mark.parametrize( - 'setup_state', [ - [None, 'AuditSetup', 'audit.setup'] - ], - indirect=True -) -def test_audit_setup_alert(setup_state): - with mock("audit.update_audit_dataset", """ - from middlewared.service import private - @private - async def mock(self, new): - raise Exception() - """): - unused, alert_class, audit_method = setup_state - call(audit_method) - sleep(1) - alerts = call("alert.list") - class_alerts = [alert for alert in alerts if alert['klass'] == alert_class] - assert len(class_alerts) > 0, class_alerts - assert class_alerts[0]['klass'] == 'AuditSetup', class_alerts - assert class_alerts[0]['formatted'].startswith("Audit service failed to complete setup"), class_alerts - - -def test_audit_health_monitor_alert(): - with mock("auditbackend.query", """ - from middlewared.service import private - from middlewared.schema import accepts, List, Dict, Str - @private - @accepts( - Str('db_name', required=True), - List('query-filters'), - Dict('query-options', additional_attrs=True) - ) - async def mock(self, db_name, filters, options): - raise CallError('TEST_SERVICE: connection to audit database is not initialized.') - """): - alert = call("alert.run_source", "AuditServiceHealth")[0] - assert alert['source'] == 'AuditServiceHealth', f"Received source: {alert['source']}" - assert alert['text'].startswith("Failed to perform audit query"), f"Received text: {alert['text']}" - assert "connection to audit database is not initialized" in alert['args']['verrs'], f"Received args: {alert['args']}" diff --git a/tests/api2/test_audit_api_key.py b/tests/api2/test_audit_api_key.py deleted file mode 100644 index 0d2611c641f43..0000000000000 --- a/tests/api2/test_audit_api_key.py +++ /dev/null @@ -1,34 +0,0 @@ -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.audit import expect_audit_method_calls - -API_KEY_NAME = 'AUDIT_API_KEY' - - -def test_api_key_audit(): - payload = {'name': API_KEY_NAME, 'allowlist': [{'resource': '*', 'method': '*'}]} - payload2 = {'allowlist': []} - audit_id = None - - try: - with expect_audit_method_calls([{ - 'method': 'api_key.create', - 'params': [payload], - 'description': f'Create API key {API_KEY_NAME}', - }]): - api_key_id = call('api_key.create', payload)['id'] - - with expect_audit_method_calls([{ - 'method': 'api_key.update', - 'params': [api_key_id, payload2], - 'description': f'Update API key {API_KEY_NAME}', - }]): - call('api_key.update', api_key_id, payload2) - - finally: - if audit_id: - with expect_audit_method_calls([{ - 'method': 'api_key.delete', - 'params': [api_key_id], - 'description': f'Delete API key {API_KEY_NAME}', - }]): - call('api_key.delete', api_key_id) diff --git a/tests/api2/test_audit_audit.py b/tests/api2/test_audit_audit.py deleted file mode 100644 index 9447d5f8bf40d..0000000000000 --- a/tests/api2/test_audit_audit.py +++ /dev/null @@ -1,174 +0,0 @@ -import os -import sys - -import operator -import pytest -import requests -import time -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.utils import call, url -from middlewared.test.integration.utils.audit import expect_audit_log, expect_audit_method_calls -from unittest.mock import ANY - -sys.path.append(os.getcwd()) -from functions import POST, PUT - - -# ===================================================================== -# Fixtures and utilities -# ===================================================================== -@pytest.fixture(scope='class') -def report_exists(request): - report_pathname = request.config.cache.get('report_pathname', None) - assert report_pathname is not None - yield report_pathname - - -# ===================================================================== -# Tests -# ===================================================================== -@pytest.mark.parametrize('payload,success', [ - ({'retention': 20}, True), - ({'retention': 0}, False) -]) -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_audit_config_audit(api, payload, success): - ''' - Test the auditing of Audit configuration changes - ''' - initial_audit_config = call('audit.config') - protocol = 'WEBSOCKET' if api == 'ws' else 'REST' - rest_operator = operator.eq if success else operator.ne - expected_log_template = { - "service_data": { - "vers": { - "major": 0, - "minor": 1, - }, - "origin": ANY, - "protocol": protocol, - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "root"}, - }, - }, - "event": "METHOD_CALL", - "event_data": { - "authenticated": True, - "authorized": True, - "method": "audit.update", - "params": [payload], - "description": "Update Audit Configuration", - }, - "success": success - } - try: - with expect_audit_log([expected_log_template]): - if api == 'ws': - if success: - call('audit.update', payload) - else: - with pytest.raises(ValidationErrors): - call('audit.update', payload) - elif api == 'rest': - result = PUT('/audit/', payload) - assert rest_operator(result.status_code, 200), result.text - else: - raise ValueError(api) - finally: - # Restore initial state - restore_payload = { - 'retention': initial_audit_config['retention'], - } - if api == 'ws': - call('audit.update', restore_payload) - elif api == 'rest': - result = PUT('/audit/', restore_payload) - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_audit_export_audit(request, api): - ''' - Test the auditing of the audit export function - ''' - payload = { - 'export_format': 'CSV' - } - with expect_audit_method_calls([{ - 'method': 'audit.export', - 'params': [payload], - 'description': 'Export Audit Data', - }]): - if api == 'ws': - report_pathname = call('audit.export', payload, job=True) - request.config.cache.set('report_pathname', report_pathname) - elif api == 'rest': - results = POST("/audit/export/", payload) - assert results.status_code == 200, results.text - else: - raise ValueError(api) - - -class TestAuditDownload: - """ - Wrap these tests in a class for the 'report_exists' fixture - """ - @pytest.mark.parametrize('api', ['ws', 'rest']) - def test_audit_download_audit(self, report_exists, api): - ''' - Test the auditing of the audit download function - ''' - report_pathname = report_exists - st = call('filesystem.stat', report_pathname) - - init_audit_query = call("audit.query", { - "query-filters": [["event_data.method", "=", "audit.download_report"]], - "query-options": {"select": ["event_data", "success"]} - }) - init_len = len(init_audit_query) - - report_name = os.path.basename(report_pathname) - payload = { - 'report_name': report_name - } - if api == 'ws': - job_id, download_data = call( - 'core.download', 'audit.download_report', [payload], 'report.csv' - ) - r = requests.get(f"{url()}{download_data}") - r.raise_for_status() - assert len(r.content) == st['size'] - elif api == 'rest': - results = POST("/audit/download_report/", payload) - assert results.status_code == 200, results.text - else: - raise ValueError(api) - - post_audit_query = call("audit.query", { - "query-filters": [["event_data.method", "=", "audit.download_report"]], - "query-options": {"select": ["event_data", "success"]} - }) - post_len = len(post_audit_query) - - # This usually requires only one cycle - count_down = 10 - while count_down > 0 and post_len == init_len: - time.sleep(1) - count_down -= 1 - post_audit_query = call("audit.query", { - "query-filters": [["event_data.method", "=", "audit.download_report"]], - "query-options": {"select": ["event_data", "success"]} - }) - post_len = len(post_audit_query) - - assert count_down > 0, "Timed out waiting for the audit entry" - assert post_len > init_len - - # Confirm this download is recorded - entry = post_audit_query[-1] - event_data = entry['event_data'] - params = event_data['params'][0] - assert report_name in params['report_name'] diff --git a/tests/api2/test_audit_basic.py b/tests/api2/test_audit_basic.py deleted file mode 100644 index 5c68b59efc010..0000000000000 --- a/tests/api2/test_audit_basic.py +++ /dev/null @@ -1,252 +0,0 @@ -from middlewared.test.integration.assets.account import user, unprivileged_user_client -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.assets.smb import smb_share -from middlewared.test.integration.utils import call, url -from protocols import smb_connection -from time import sleep - -import os -import pytest -import requests -import secrets -import string - - -SMBUSER = 'audit-smb-user' -PASSWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) -AUDIT_DATASET_CONFIG = { - # keyname : "audit"=audit only setting, "zfs"=zfs dataset setting, "ro"=read-only (not a setting) - 'retention': 'audit', - 'quota': 'zfs', - 'reservation': 'zfs', - 'quota_fill_warning': 'zfs', - 'quota_fill_critical': 'zfs', - 'remote_logging_enabled': 'other', - 'space': 'ro' -} -MiB = 1024**2 -GiB = 1024**3 - - -# ===================================================================== -# Fixtures and utilities -# ===================================================================== -class AUDIT_CONFIG(): - defaults = { - 'retention': 7, - 'quota': 0, - 'reservation': 0, - 'quota_fill_warning': 75, - 'quota_fill_critical': 95 - } - - -# def get_zfs(key, zfs_config): -def get_zfs(data_type, key, zfs_config): - """ Get the equivalent ZFS value associated with the audit config setting """ - - types = { - 'zfs': { - 'reservation': zfs_config['properties']['refreservation']['parsed'] or 0, - 'quota': zfs_config['properties']['refquota']['parsed'] or 0, # audit quota == ZFS refquota - 'refquota': zfs_config['properties']['refquota']['parsed'] or 0, - 'quota_fill_warning': zfs_config['org.freenas:quota_warning'], - 'quota_fill_critical': zfs_config['org.freenas:quota_critical'] - }, - 'space': { - 'used': zfs_config['properties']['used']['parsed'], - 'used_by_snapshots': zfs_config['properties']['usedbysnapshots']['parsed'], - 'available': zfs_config['properties']['available']['parsed'], - 'used_by_dataset': zfs_config['properties']['usedbydataset']['parsed'], - # We set 'refreservation' and there is no 'usedbyreservation' - 'used_by_reservation': zfs_config['properties']['usedbyrefreservation']['parsed'] - } - } - # return zfs[key] - return types[data_type][key] - - -@pytest.fixture(scope='class') -def initialize_for_smb_tests(): - with dataset('audit-test-basic', data={'share_type': 'SMB'}) as ds: - with smb_share(os.path.join('/mnt', ds), 'AUDIT_BASIC_TEST', { - 'purpose': 'NO_PRESET', - 'guestok': False, - 'audit': {'enable': True} - }) as s: - with user({ - 'username': SMBUSER, - 'full_name': SMBUSER, - 'group_create': True, - 'password': PASSWD, - 'smb': True - }) as u: - yield {'dataset': ds, 'share': s, 'user': u} - - -@pytest.fixture(scope='class') -def init_audit(): - """ Provides the audit and dataset configs and cleans up afterward """ - try: - dataset = call('audit.get_audit_dataset') - config = call('audit.config') - yield (config, dataset) - finally: - call('audit.update', AUDIT_CONFIG.defaults) - - -# ===================================================================== -# Tests -# ===================================================================== -class TestAuditConfig: - def test_audit_config_defaults(self, init_audit): - (config, dataset) = init_audit - - # Confirm existence of config entries - for key in [k for k in AUDIT_DATASET_CONFIG]: - assert key in config, str(config) - - # Confirm audit default config settings - assert config['retention'] == AUDIT_CONFIG.defaults['retention'] - assert config['quota'] == AUDIT_CONFIG.defaults['quota'] - assert config['reservation'] == AUDIT_CONFIG.defaults['reservation'] - assert config['quota_fill_warning'] == AUDIT_CONFIG.defaults['quota_fill_warning'] - assert config['quota_fill_critical'] == AUDIT_CONFIG.defaults['quota_fill_critical'] - assert config['remote_logging_enabled'] is False - for key in ['used', 'used_by_snapshots', 'used_by_dataset', 'used_by_reservation', 'available']: - assert key in config['space'], str(config['space']) - - for service in ['MIDDLEWARE', 'SMB', 'SUDO']: - assert service in config['enabled_services'] - - # Confirm audit dataset settings - for key in [k for k in AUDIT_DATASET_CONFIG if AUDIT_DATASET_CONFIG[k] == 'zfs']: - assert get_zfs('zfs', key, dataset) == config[key], f"config[{key}] = {config[key]}" - - def test_audit_config_dataset_defaults(self, init_audit): - """ Confirm Audit dataset uses Audit default settings """ - (unused, ds_config) = init_audit - assert ds_config['org.freenas:refquota_warning'] == AUDIT_CONFIG.defaults['quota_fill_warning'] - assert ds_config['org.freenas:refquota_critical'] == AUDIT_CONFIG.defaults['quota_fill_critical'] - - def test_audit_config_updates(self): - """ - This test validates that setting values has expected results. - """ - new_config = call('audit.update', {'retention': 10}) - assert new_config['retention'] == 10 - - # quota are in units of GiB - new_config = call('audit.update', {'quota': 1}) - assert new_config['quota'] == 1 - audit_dataset = call('audit.get_audit_dataset') - - # ZFS value is in units of bytes. Convert to GiB for comparison. - assert get_zfs('zfs', 'refquota', audit_dataset) // GiB == new_config['quota'] - - # Confirm ZFS and audit config are in sync - assert new_config['space']['available'] == get_zfs('space', 'available', audit_dataset) - assert new_config['space']['used_by_dataset'] == get_zfs('space', 'used', audit_dataset) - - # Check that we're actually setting the quota by evaluating available space - # Change the the quota to something more interesting - new_config = call('audit.update', {'quota': 2}) - assert new_config['quota'] == 2 - - audit_dataset = call('audit.get_audit_dataset') - assert get_zfs('zfs', 'refquota', audit_dataset) == 2*GiB # noqa (allow 2*GiB) - - used_in_dataset = get_zfs('space', 'used_by_dataset', audit_dataset) - assert 2*GiB - new_config['space']['available'] == used_in_dataset # noqa (allow 2*GiB) - - new_config = call('audit.update', {'reservation': 1}) - assert new_config['reservation'] == 1 - assert new_config['space']['used_by_reservation'] != 0 - - new_config = call('audit.update', { - 'quota_fill_warning': 70, - 'quota_fill_critical': 80 - }) - - assert new_config['quota_fill_warning'] == 70 - assert new_config['quota_fill_critical'] == 80 - - # Test disable reservation - new_config = call('audit.update', {'reservation': 0}) - assert new_config['reservation'] == 0 - - # Test disable quota - new_config = call('audit.update', {'quota': 0}) - assert new_config['quota'] == 0 - - -class TestAuditOps: - def test_audit_query(self, initialize_for_smb_tests): - # If this test has been run more than once on this VM, then - # the audit DB _will_ record the creation. - # Let's get the starting count. - initial_ops_count = call('audit.query', { - 'services': ['SMB'], - 'query-filters': [['username', '=', SMBUSER]], - 'query-options': {'count': True} - }) - - share = initialize_for_smb_tests['share'] - with smb_connection( - share=share['name'], - username=SMBUSER, - password=PASSWD, - ) as c: - fd = c.create_file('testfile.txt', 'w') - for i in range(0, 3): - c.write(fd, b'foo') - c.read(fd, 0, 3) - c.close(fd, True) - - retries = 2 - ops_count = initial_ops_count - while retries > 0 and (ops_count - initial_ops_count) <= 0: - sleep(5) - ops_count = call('audit.query', { - 'services': ['SMB'], - 'query-filters': [['username', '=', SMBUSER]], - 'query-options': {'count': True} - }) - retries -= 1 - assert ops_count > initial_ops_count, f"retries remaining = {retries}" - - def test_audit_export(self): - for backend in ['CSV', 'JSON', 'YAML']: - report_path = call('audit.export', {'export_format': backend}, job=True) - assert report_path.startswith('/audit/reports/root/') - st = call('filesystem.stat', report_path) - assert st['size'] != 0, str(st) - - job_id, path = call( - "core.download", "audit.download_report", - [{"report_name": os.path.basename(report_path)}], - f"report.{backend.lower()}" - ) - r = requests.get(f"{url()}{path}") - r.raise_for_status() - assert len(r.content) == st['size'] - - def test_audit_export_nonroot(self): - with unprivileged_user_client(roles=['SYSTEM_AUDIT_READ', 'FILESYSTEM_ATTRS_READ']) as c: - me = c.call('auth.me') - username = me['pw_name'] - - for backend in ['CSV', 'JSON', 'YAML']: - report_path = c.call('audit.export', {'export_format': backend}, job=True) - assert report_path.startswith(f'/audit/reports/{username}/') - st = c.call('filesystem.stat', report_path) - assert st['size'] != 0, str(st) - - job_id, path = c.call( - "core.download", "audit.download_report", - [{"report_name": os.path.basename(report_path)}], - f"report.{backend.lower()}" - ) - r = requests.get(f"{url()}{path}") - r.raise_for_status() - assert len(r.content) == st['size'] diff --git a/tests/api2/test_audit_dataset.py b/tests/api2/test_audit_dataset.py deleted file mode 100644 index 57f32e6d368a1..0000000000000 --- a/tests/api2/test_audit_dataset.py +++ /dev/null @@ -1,31 +0,0 @@ -from middlewared.test.integration.utils import call, pool -from middlewared.test.integration.utils.audit import expect_audit_method_calls - -DS_NAME = f'{pool}/audit_dataset_insert_name_here' - - -def test_dataset_audit(): - payload = {'name': DS_NAME} - - try: - with expect_audit_method_calls([{ - 'method': 'pool.dataset.create', - 'params': [payload], - 'description': f'Pool dataset create {DS_NAME}', - }]): - call('pool.dataset.create', payload) - - with expect_audit_method_calls([{ - 'method': 'pool.dataset.update', - 'params': [DS_NAME, {'atime': 'OFF'}], - 'description': f'Pool dataset update {DS_NAME}', - }]): - call('pool.dataset.update', DS_NAME, {'atime': 'OFF'}) - - finally: - with expect_audit_method_calls([{ - 'method': 'pool.dataset.delete', - 'params': [DS_NAME], - 'description': f'Pool dataset delete {DS_NAME}', - }]): - call('pool.dataset.delete', DS_NAME) diff --git a/tests/api2/test_audit_ftp.py b/tests/api2/test_audit_ftp.py deleted file mode 100644 index 14075d5c27535..0000000000000 --- a/tests/api2/test_audit_ftp.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import sys - -import pytest -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.audit import expect_audit_method_calls - -sys.path.append(os.getcwd()) -from functions import PUT - - -@pytest.fixture(scope='module') -def nfs_audit_dataset(request): - with dataset('audit-test-nfs') as ds: - try: - yield ds - finally: - pass - - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_ftp_config_audit(api): - ''' - Test the auditing of FTP configuration changes - ''' - initial_ftp_config = call('ftp.config') - try: - # UPDATE - payload = { - 'clients': 1000, - 'banner': "Hello, from New York" - } - with expect_audit_method_calls([{ - 'method': 'ftp.update', - 'params': [payload], - 'description': 'Update FTP configuration', - }]): - if api == 'ws': - call('ftp.update', payload) - elif api == 'rest': - result = PUT('/ftp/', payload) - assert result.status_code == 200, result.text - else: - raise ValueError(api) - finally: - # Restore initial state - restore_payload = { - 'clients': initial_ftp_config['clients'], - 'banner': initial_ftp_config['banner'] - } - if api == 'ws': - call('ftp.update', restore_payload) - elif api == 'rest': - result = PUT('/ftp/', restore_payload) - assert result.status_code == 200, result.text - else: - raise ValueError(api) diff --git a/tests/api2/test_audit_iscsi.py b/tests/api2/test_audit_iscsi.py deleted file mode 100644 index 07914f8120989..0000000000000 --- a/tests/api2/test_audit_iscsi.py +++ /dev/null @@ -1,576 +0,0 @@ -import os -import sys - -import pytest -from middlewared.test.integration.assets.iscsi import (iscsi_extent, - iscsi_target) -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.audit import expect_audit_method_calls - -sys.path.append(os.getcwd()) -from functions import DELETE, POST, PUT - -REDACTED_SECRET = '********' -MB = 1024 * 1024 -MB_100 = 100 * MB -DEFAULT_ISCSI_PORT = 3260 - - -@pytest.fixture(scope='module') -def initialize_zvol_for_iscsi_audit_tests(request): - with dataset('audit-test-iscsi') as ds: - zvol = f'{ds}/zvol' - payload = { - 'name': zvol, - 'type': 'VOLUME', - 'volsize': MB_100, - 'volblocksize': '16K' - } - zvol_config = call('pool.dataset.create', payload) - try: - yield zvol - finally: - call('pool.dataset.delete', zvol_config['id']) - - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_iscsi_auth_audit(api): - auth_config = None - tag = 1 - user1 = 'someuser1' - user2 = 'someuser2' - password1 = 'somepassword123' - password2 = 'newpassword1234' - try: - # CREATE - with expect_audit_method_calls([{ - 'method': 'iscsi.auth.create', - 'params': [ - { - 'tag': tag, - 'user': user1, - 'secret': REDACTED_SECRET, - } - ], - 'description': f'Create iSCSI Authorized Access {user1} ({tag})', - }]): - payload = { - 'tag': tag, - 'user': user1, - 'secret': password1, - } - if api == 'ws': - auth_config = call('iscsi.auth.create', payload) - elif api == 'rest': - result = POST('/iscsi/auth/', payload) - assert result.status_code == 200, result.text - auth_config = result.json() - else: - raise ValueError(api) - # UPDATE - with expect_audit_method_calls([{ - 'method': 'iscsi.auth.update', - 'params': [ - auth_config['id'], - { - 'user': user2, - 'secret': REDACTED_SECRET, - }], - 'description': f'Update iSCSI Authorized Access {user1} ({tag})', - }]): - payload = { - 'user': user2, - 'secret': password2, - } - if api == 'ws': - auth_config = call('iscsi.auth.update', auth_config['id'], payload) - elif api == 'rest': - result = PUT(f'/iscsi/auth/id/{auth_config["id"]}', payload) - assert result.status_code == 200, result.text - auth_config = result.json() - else: - raise ValueError(api) - finally: - if auth_config is not None: - # DELETE - id_ = auth_config['id'] - with expect_audit_method_calls([{ - 'method': 'iscsi.auth.delete', - 'params': [id_], - 'description': f'Delete iSCSI Authorized Access {user2} ({tag})', - }]): - if api == 'ws': - call('iscsi.auth.delete', id_) - elif api == 'rest': - result = DELETE(f'/iscsi/auth/id/{id_}') - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_iscsi_extent_audit(api, initialize_zvol_for_iscsi_audit_tests): - extent_name1 = 'extent1' - extent_name2 = 'extent2' - disk = f'zvol/{initialize_zvol_for_iscsi_audit_tests}' - try: - # CREATE - with expect_audit_method_calls([{ - 'method': 'iscsi.extent.create', - 'params': [ - { - 'type': 'DISK', - 'disk': disk, - 'name': extent_name1, - } - ], - 'description': f'Create iSCSI extent {extent_name1}', - }]): - payload = { - 'type': 'DISK', - 'disk': disk, - 'name': extent_name1, - } - if api == 'ws': - extent_config = call('iscsi.extent.create', payload) - elif api == 'rest': - result = POST('/iscsi/extent/', payload) - assert result.status_code == 200, result.text - extent_config = result.json() - else: - raise ValueError(api) - # UPDATE - with expect_audit_method_calls([{ - 'method': 'iscsi.extent.update', - 'params': [ - extent_config['id'], - { - 'name': extent_name2, - }], - 'description': f'Update iSCSI extent {extent_name1}', - }]): - payload = { - 'name': extent_name2, - } - if api == 'ws': - extent_config = call('iscsi.extent.update', extent_config['id'], payload) - elif api == 'rest': - result = PUT(f'/iscsi/extent/id/{extent_config["id"]}', payload) - assert result.status_code == 200, result.text - extent_config = result.json() - else: - raise ValueError(api) - finally: - if extent_config is not None: - # DELETE - id_ = extent_config['id'] - with expect_audit_method_calls([{ - 'method': 'iscsi.extent.delete', - 'params': [id_], - 'description': f'Delete iSCSI extent {extent_name2}', - }]): - if api == 'ws': - call('iscsi.extent.delete', id_) - elif api == 'rest': - result = DELETE(f'/iscsi/extent/id/{id_}') - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_iscsi_global_audit(api): - global_config = None - try: - # CREATE - with expect_audit_method_calls([{ - 'method': 'iscsi.global.update', - 'params': [ - { - 'alua': True, - 'listen_port': 13260, - } - ], - 'description': 'Update iSCSI', - }]): - payload = { - 'alua': True, - 'listen_port': 13260, - } - if api == 'ws': - global_config = call('iscsi.global.update', payload) - elif api == 'rest': - result = PUT('/iscsi/global/', payload) - assert result.status_code == 200, result.text - global_config = result.json() - else: - raise ValueError(api) - finally: - if global_config is not None: - payload = { - 'alua': False, - 'listen_port': DEFAULT_ISCSI_PORT, - } - if api == 'ws': - global_config = call('iscsi.global.update', payload) - elif api == 'rest': - result = PUT('/iscsi/global/', payload) - assert result.status_code == 200, result.text - global_config = result.json() - else: - raise ValueError(api) - - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_iscsi_host_audit(api): - host_config = None - ip = '1.2.3.4' - iqn = 'iqn.1993-08.org.debian:01:1234567890' - description = 'Development VM (debian)' - try: - # CREATE - with expect_audit_method_calls([{ - 'method': 'iscsi.host.create', - 'params': [ - { - 'ip': ip, - 'iqns': [iqn], - } - ], - 'description': f'Create iSCSI host {ip}', - }]): - payload = { - 'ip': ip, - 'iqns': [iqn], - } - if api == 'ws': - host_config = call('iscsi.host.create', payload) - elif api == 'rest': - result = POST('/iscsi/host/', payload) - assert result.status_code == 200, result.text - host_config = result.json() - else: - raise ValueError(api) - # UPDATE - with expect_audit_method_calls([{ - 'method': 'iscsi.host.update', - 'params': [ - host_config['id'], - { - 'description': description, - }], - 'description': f'Update iSCSI host {ip}', - }]): - payload = { - 'description': description, - } - if api == 'ws': - host_config = call('iscsi.host.update', host_config['id'], payload) - elif api == 'rest': - result = PUT(f'/iscsi/host/id/{host_config["id"]}', payload) - assert result.status_code == 200, result.text - host_config = result.json() - else: - raise ValueError(api) - finally: - if host_config is not None: - # DELETE - id_ = host_config['id'] - with expect_audit_method_calls([{ - 'method': 'iscsi.host.delete', - 'params': [id_], - 'description': f'Delete iSCSI host {ip}', - }]): - if api == 'ws': - call('iscsi.host.delete', id_) - elif api == 'rest': - result = DELETE(f'/iscsi/host/id/{id_}') - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_iscsi_initiator_audit(api): - initiator_config = None - comment = f'Default initiator ({api})' - comment2 = f'INITIATOR ({api})' - try: - # CREATE - with expect_audit_method_calls([{ - 'method': 'iscsi.initiator.create', - 'params': [ - { - 'comment': comment, - 'initiators': [], - } - ], - 'description': f'Create iSCSI initiator {comment}', - }]): - payload = { - 'comment': comment, - 'initiators': [], - } - if api == 'ws': - initiator_config = call('iscsi.initiator.create', payload) - elif api == 'rest': - result = POST('/iscsi/initiator/', payload) - assert result.status_code == 200, result.text - initiator_config = result.json() - else: - raise ValueError(api) - # UPDATE - with expect_audit_method_calls([{ - 'method': 'iscsi.initiator.update', - 'params': [ - initiator_config['id'], - { - 'comment': comment2, - 'initiators': ['1.2.3.4', '5.6.7.8'], - }], - 'description': f'Update iSCSI initiator {comment}', - }]): - payload = { - 'comment': comment2, - 'initiators': ['1.2.3.4', '5.6.7.8'], - } - if api == 'ws': - initiator_config = call('iscsi.initiator.update', initiator_config['id'], payload) - elif api == 'rest': - result = PUT(f'/iscsi/initiator/id/{initiator_config["id"]}', payload) - assert result.status_code == 200, result.text - initiator_config = result.json() - else: - raise ValueError(api) - finally: - if initiator_config is not None: - # DELETE - id_ = initiator_config['id'] - with expect_audit_method_calls([{ - 'method': 'iscsi.initiator.delete', - 'params': [id_], - 'description': f'Delete iSCSI initiator {comment2}', - }]): - if api == 'ws': - call('iscsi.initiator.delete', id_) - elif api == 'rest': - result = DELETE(f'/iscsi/initiator/id/{id_}') - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_iscsi_portal_audit(api): - portal_config = None - comment = f'Default portal ({api})' - comment2 = f'PORTAL ({api})' - try: - # CREATE - with expect_audit_method_calls([{ - 'method': 'iscsi.portal.create', - 'params': [ - { - 'listen': [{'ip': '0.0.0.0'}], - 'comment': comment, - 'discovery_authmethod': 'NONE', - } - ], - 'description': f'Create iSCSI portal {comment}', - }]): - payload = { - 'listen': [{'ip': '0.0.0.0'}], - 'comment': comment, - 'discovery_authmethod': 'NONE', - } - if api == 'ws': - portal_config = call('iscsi.portal.create', payload) - elif api == 'rest': - result = POST('/iscsi/portal/', payload) - assert result.status_code == 200, result.text - portal_config = result.json() - else: - raise ValueError(api) - # UPDATE - with expect_audit_method_calls([{ - 'method': 'iscsi.portal.update', - 'params': [ - portal_config['id'], - { - 'comment': comment2, - }], - 'description': f'Update iSCSI portal {comment}', - }]): - payload = { - 'comment': comment2, - } - if api == 'ws': - portal_config = call('iscsi.portal.update', portal_config['id'], payload) - elif api == 'rest': - result = PUT(f'/iscsi/portal/id/{portal_config["id"]}', payload) - assert result.status_code == 200, result.text - portal_config = result.json() - else: - raise ValueError(api) - finally: - if portal_config is not None: - # DELETE - id_ = portal_config['id'] - with expect_audit_method_calls([{ - 'method': 'iscsi.portal.delete', - 'params': [id_], - 'description': f'Delete iSCSI portal {comment2}', - }]): - if api == 'ws': - call('iscsi.portal.delete', id_) - elif api == 'rest': - result = DELETE(f'/iscsi/portal/id/{id_}') - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_iscsi_target_audit(api): - target_config = None - target_name = 'target1' - target_alias1 = f'target1 alias ({api})' - target_alias2 = f'Updated target1 alias ({api})' - try: - # CREATE - with expect_audit_method_calls([{ - 'method': 'iscsi.target.create', - 'params': [ - { - 'name': target_name, - 'alias': target_alias1, - } - ], - 'description': f'Create iSCSI target {target_name}', - }]): - payload = { - 'name': target_name, - 'alias': target_alias1, - } - if api == 'ws': - target_config = call('iscsi.target.create', payload) - elif api == 'rest': - result = POST('/iscsi/target/', payload) - assert result.status_code == 200, result.text - target_config = result.json() - else: - raise ValueError(api) - # UPDATE - with expect_audit_method_calls([{ - 'method': 'iscsi.target.update', - 'params': [ - target_config['id'], - { - 'alias': target_alias2, - }], - 'description': f'Update iSCSI target {target_name}', - }]): - payload = { - 'alias': target_alias2, - } - if api == 'ws': - target_config = call('iscsi.target.update', target_config['id'], payload) - elif api == 'rest': - result = PUT(f'/iscsi/target/id/{target_config["id"]}', payload) - assert result.status_code == 200, result.text - target_config = result.json() - else: - raise ValueError(api) - finally: - if target_config is not None: - # DELETE - id_ = target_config['id'] - with expect_audit_method_calls([{ - 'method': 'iscsi.target.delete', - 'params': [id_, True], - 'description': f'Delete iSCSI target {target_name}', - }]): - if api == 'ws': - call('iscsi.target.delete', id_, True) - elif api == 'rest': - result = DELETE(f'/iscsi/target/id/{id_}', True) - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_iscsi_targetextent_audit(api, initialize_zvol_for_iscsi_audit_tests): - - payload = { - 'type': 'DISK', - 'disk': f'zvol/{initialize_zvol_for_iscsi_audit_tests}', - 'name': 'extent1', - } - with iscsi_extent(payload) as extent_config: - with iscsi_target({'name': 'target1', 'alias': 'Audit test'}) as target_config: - targetextent_config = None - try: - # CREATE - with expect_audit_method_calls([{ - 'method': 'iscsi.targetextent.create', - 'params': [ - { - 'target': target_config['id'], - 'extent': extent_config['id'], - 'lunid': 0, - } - ], - 'description': 'Create iSCSI target/LUN/extent mapping target1/0/extent1', - }]): - payload = { - 'target': target_config['id'], - 'extent': extent_config['id'], - 'lunid': 0, - } - if api == 'ws': - targetextent_config = call('iscsi.targetextent.create', payload) - elif api == 'rest': - result = POST('/iscsi/targetextent/', payload) - assert result.status_code == 200, result.text - targetextent_config = result.json() - else: - raise ValueError(api) - # UPDATE - with expect_audit_method_calls([{ - 'method': 'iscsi.targetextent.update', - 'params': [ - targetextent_config['id'], - { - 'lunid': 1, - }], - 'description': 'Update iSCSI target/LUN/extent mapping target1/0/extent1', - }]): - payload = { - 'lunid': 1, - } - if api == 'ws': - targetextent_config = call('iscsi.targetextent.update', targetextent_config['id'], payload) - elif api == 'rest': - result = PUT(f'/iscsi/targetextent/id/{targetextent_config["id"]}', payload) - assert result.status_code == 200, result.text - targetextent_config = result.json() - else: - raise ValueError(api) - finally: - if targetextent_config is not None: - # DELETE - id_ = targetextent_config['id'] - with expect_audit_method_calls([{ - 'method': 'iscsi.targetextent.delete', - 'params': [id_, True], - 'description': 'Delete iSCSI target/LUN/extent mapping target1/1/extent1', - }]): - if api == 'ws': - call('iscsi.targetextent.delete', id_, True) - elif api == 'rest': - result = DELETE(f'/iscsi/targetextent/id/{id_}', True) - assert result.status_code == 200, result.text - else: - raise ValueError(api) diff --git a/tests/api2/test_audit_nfs.py b/tests/api2/test_audit_nfs.py deleted file mode 100644 index 015ef133fa107..0000000000000 --- a/tests/api2/test_audit_nfs.py +++ /dev/null @@ -1,129 +0,0 @@ -import os -import sys - -import pytest -from middlewared.service_exception import CallError -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.audit import expect_audit_method_calls - -sys.path.append(os.getcwd()) -from functions import DELETE, POST, PUT - -REDACTED_SECRET = '********' - - -@pytest.fixture(scope='module') -def nfs_audit_dataset(request): - with dataset('audit-test-nfs') as ds: - try: - yield ds - finally: - pass - - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_nfs_config_audit(api): - ''' - Test the auditing of NFS configuration changes - ''' - bogus_user = 'bogus_user' - bogus_password = 'boguspassword123' - initial_nfs_config = call('nfs.config') - try: - # UPDATE - payload = { - 'mountd_log': not initial_nfs_config['mountd_log'], - 'mountd_port': 618, - 'protocols': ["NFSV4"] - } - with expect_audit_method_calls([{ - 'method': 'nfs.update', - 'params': [payload], - 'description': 'Update NFS configuration', - }]): - if api == 'ws': - call('nfs.update', payload) - elif api == 'rest': - result = PUT('/nfs/', payload) - assert result.status_code == 200, result.text - else: - raise ValueError(api) - finally: - # Restore initial state - restore_payload = { - 'mountd_log': initial_nfs_config['mountd_log'], - 'mountd_port': initial_nfs_config['mountd_port'], - 'protocols': initial_nfs_config['protocols'] - } - if api == 'ws': - call('nfs.update', restore_payload) - elif api == 'rest': - result = PUT('/nfs/', restore_payload) - assert result.status_code == 200, result.text - else: - raise ValueError(api) - - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_nfs_share_audit(api, nfs_audit_dataset): - ''' - Test the auditing of NFS share operations - ''' - nfs_export_path = f"/mnt/{nfs_audit_dataset}" - try: - # CREATE - payload = { - "comment": "My Test Share", - "path": nfs_export_path, - "security": ["SYS"] - } - with expect_audit_method_calls([{ - 'method': 'sharing.nfs.create', - 'params': [payload], - 'description': f'NFS share create {nfs_export_path}', - }]): - if api == 'ws': - share_config = call('sharing.nfs.create', payload) - elif api == 'rest': - results = POST("/sharing/nfs/", payload) - assert results.status_code == 200, results.text - share_config = results.json() - else: - raise ValueError(api) - # UPDATE - payload = { - "security": [] - } - with expect_audit_method_calls([{ - 'method': 'sharing.nfs.update', - 'params': [ - share_config['id'], - payload, - ], - 'description': f'NFS share update {nfs_export_path}', - }]): - if api == 'ws': - share_config = call('sharing.nfs.update', share_config['id'], payload) - elif api == 'rest': - results = PUT(f"/sharing/nfs/id/{share_config['id']}/", payload) - assert results.status_code == 200, results.text - share_config = results.json() - else: - raise ValueError(api) - finally: - if share_config is not None: - # DELETE - id_ = share_config['id'] - with expect_audit_method_calls([{ - 'method': 'sharing.nfs.delete', - 'params': [id_], - 'description': f'NFS share delete {nfs_export_path}', - }]): - if api == 'ws': - call('sharing.nfs.delete', id_) - elif api == 'rest': - result = DELETE(f'/sharing/nfs/id/{id_}') - assert result.status_code == 200, result.text - else: - raise ValueError(api) diff --git a/tests/api2/test_audit_permission.py b/tests/api2/test_audit_permission.py deleted file mode 100644 index b97c2079e190b..0000000000000 --- a/tests/api2/test_audit_permission.py +++ /dev/null @@ -1,55 +0,0 @@ -import os - -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.audit import expect_audit_method_calls - -JENNY = 8675309 - - -def test_audit_chown(): - with dataset('audit_chown') as ds: - path = os.path.join('/mnt', ds) - payload = {'path': path, 'uid': JENNY} - - with expect_audit_method_calls([{ - 'method': 'filesystem.chown', - 'params': [payload], - 'description': f'Filesystem change owner {path}' - }]): - call('filesystem.chown', payload, job=True) - - -def test_audit_setperm(): - with dataset('audit_setperm') as ds: - path = os.path.join('/mnt', ds) - payload = {'path': path, 'mode': '777'} - - with expect_audit_method_calls([{ - 'method': 'filesystem.setperm', - 'params': [payload], - 'description': f'Filesystem set permission {path}' - }]): - call('filesystem.setperm', payload, job=True) - - -def test_audit_setacl(): - with dataset('audit_setacl', {'share_type': 'SMB'}) as ds: - path = os.path.join('/mnt', ds) - the_acl = call('filesystem.getacl', os.path.join('/mnt', ds))['acl'] - the_acl.append({ - 'tag': 'USER', - 'id': JENNY, - 'perms': {'BASIC': 'FULL_CONTROL'}, - 'flags': {'BASIC': 'INHERIT'}, - 'type': 'ALLOW' - }) - - payload = {'path': path, 'dacl': the_acl} - - with expect_audit_method_calls([{ - 'method': 'filesystem.setacl', - 'params': [payload], - 'description': f'Filesystem set ACL {path}' - }]): - call('filesystem.setacl', payload, job=True) diff --git a/tests/api2/test_audit_rest.py b/tests/api2/test_audit_rest.py deleted file mode 100644 index 441de65b03dda..0000000000000 --- a/tests/api2/test_audit_rest.py +++ /dev/null @@ -1,205 +0,0 @@ -# -*- coding=utf-8 -*- -import io -import json -import os -import sys -from unittest.mock import ANY - -import requests - -from middlewared.test.integration.assets.account import unprivileged_user -from middlewared.test.integration.utils import call, url -from middlewared.test.integration.utils.audit import expect_audit_log - -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import POST - - -def test_unauthenticated_call(): - with expect_audit_log([ - { - "event": "AUTHENTICATION", - "event_data": { - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "invalid"}, - }, - "error": "Bad username or password", - }, - "success": False, - } - ], include_logins=True): - r = requests.get(f"{url()}/api/v2.0/system/info", auth=("invalid", "password")) - assert r.status_code == 401 - - -def test_unauthenticated_upload_call(): - with expect_audit_log([ - { - "event": "AUTHENTICATION", - "event_data": { - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "invalid"}, - }, - "error": "Bad username or password", - }, - "success": False, - } - ], include_logins=True): - r = requests.post( - f"{url()}/api/v2.0/resttest/test_input_pipe", - auth=("invalid", "password"), - files={ - "data": (None, io.StringIO('{"key": "value"}')), - "file": (None, io.StringIO("FILE")), - }, - ) - assert r.status_code == 401 - - -def test_authenticated_call(): - user_id = None - try: - with expect_audit_log([ - { - "service_data": { - "vers": { - "major": 0, - "minor": 1, - }, - "origin": ANY, - "protocol": "REST", - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "root"}, - }, - }, - "event": "AUTHENTICATION", - "event_data": { - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "root"}, - }, - "error": None, - }, - "success": True, - }, - { - "service_data": { - "vers": { - "major": 0, - "minor": 1, - }, - "origin": ANY, - "protocol": "REST", - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "root"}, - }, - }, - "event": "METHOD_CALL", - "event_data": { - "authenticated": True, - "authorized": True, - "method": "user.create", - "params": [ - { - "username": "sergey", - "full_name": "Sergey", - "group_create": True, - "home": "/nonexistent", - "password": "********", - } - ], - "description": "Create user sergey", - }, - "success": True, - }, - ], include_logins=True): - r = POST("/user", { - "username": "sergey", - "full_name": "Sergey", - "group_create": True, - "home": "/nonexistent", - "password": "password", - }) - assert r.status_code == 200 - user_id = r.json() - finally: - if user_id is not None: - call("user.delete", user_id) - - -def test_unauthorized_call(): - with unprivileged_user( - username="unprivileged", - group_name="unprivileged_users", - privilege_name="Unprivileged users", - allowlist=[], - roles=[], - web_shell=False, - ) as u: - with expect_audit_log([ - { - "service_data": { - "vers": { - "major": 0, - "minor": 1, - }, - "origin": ANY, - "protocol": "REST", - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": ANY}, - }, - }, - "event": "METHOD_CALL", - "event_data": { - "authenticated": True, - "authorized": False, - "method": "user.create", - "params": [{"username": "sergey", "full_name": "Sergey"}], - "description": "Create user sergey", - }, - "success": False, - } - ]): - r = requests.post( - f"{url()}/api/v2.0/user", - auth=(u.username, u.password), - headers={"Content-type": "application/json"}, - data=json.dumps({"username": "sergey", "full_name": "Sergey"}), - ) - assert r.status_code == 403, r.text - - -def test_bogus_call(): - with expect_audit_log([ - { - "service_data": { - "vers": { - "major": 0, - "minor": 1, - }, - "origin": ANY, - "protocol": "REST", - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "root"}, - }, - }, - "event": "METHOD_CALL", - "event_data": { - "authenticated": True, - "authorized": True, - "method": "user.create", - "params": [{}], - "description": "Create user", - }, - "success": False, - } - ]): - response = POST("/user", {}) - assert response.status_code == 422 diff --git a/tests/api2/test_audit_smb.py b/tests/api2/test_audit_smb.py deleted file mode 100644 index e12ba132a6953..0000000000000 --- a/tests/api2/test_audit_smb.py +++ /dev/null @@ -1,83 +0,0 @@ -import os -import sys - -import pytest -from middlewared.service_exception import CallError -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.audit import expect_audit_method_calls - -sys.path.append(os.getcwd()) - -REDACTED_SECRET = '********' - - -@pytest.fixture(scope='module') -def smb_audit_dataset(request): - with dataset('audit-test-smb') as ds: - try: - yield ds - finally: - pass - - -def test_smb_update_audit(): - ''' - Test the auditing of SMB configuration changes - ''' - initial_smb_config = call('smb.config') - payload = {'enable_smb1': True} - try: - with expect_audit_method_calls([{ - 'method': 'smb.update', - 'params': [payload], - 'description': 'Update SMB configuration', - }]): - call('smb.update', payload) - finally: - call('smb.update', {'enable_smb1': False}) - - -def test_smb_share_audit(smb_audit_dataset): - ''' - Test the auditing of SMB share operations - ''' - smb_share_path = os.path.join('/mnt', smb_audit_dataset) - try: - # CREATE - payload = { - "comment": "My Test Share", - "path": smb_share_path, - "name": "audit_share" - } - with expect_audit_method_calls([{ - 'method': 'sharing.smb.create', - 'params': [payload], - 'description': f'SMB share create audit_share', - }]): - share_config = call('sharing.smb.create', payload) - - # UPDATE - payload = { - "ro": True - } - with expect_audit_method_calls([{ - 'method': 'sharing.smb.update', - 'params': [ - share_config['id'], - payload, - ], - 'description': f'SMB share update audit_share', - }]): - share_config = call('sharing.smb.update', share_config['id'], payload) - - finally: - if share_config is not None: - # DELETE - share_id = share_config['id'] - with expect_audit_method_calls([{ - 'method': 'sharing.smb.delete', - 'params': [share_id], - 'description': f'SMB share delete audit_share', - }]): - call('sharing.smb.delete', share_id) diff --git a/tests/api2/test_audit_sudo.py b/tests/api2/test_audit_sudo.py deleted file mode 100644 index 07871e7917688..0000000000000 --- a/tests/api2/test_audit_sudo.py +++ /dev/null @@ -1,254 +0,0 @@ -import contextlib -import secrets -import string -import time - -import pytest -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.utils import call, ssh - -EVENT_KEYS = {'timestamp', 'message_timestamp', 'service_data', 'username', 'service', 'audit_id', 'address', 'event_data', 'event', 'session', 'success'} -ACCEPT_KEYS = {'command', 'submituser', 'lines', 'submithost', 'uuid', 'runenv', 'server_time', 'runcwd', 'submitcwd', 'runuid', 'runargv', 'columns', 'runuser', 'submit_time'} -REJECT_KEYS = {'command', 'submituser', 'lines', 'submithost', 'uuid', 'reason', 'runenv', 'server_time', 'runcwd', 'submitcwd', 'runuid', 'runargv', 'columns', 'runuser', 'submit_time'} - -LS_COMMAND = '/bin/ls' -ECHO_COMMAND = '/bin/echo' - -SUDO_TO_USER = 'sudo-to-user' -SUDO_TO_PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) - - -def user_sudo_events(username, count=False): - payload = { - 'services': ['SUDO'], - 'query-filters': [['username', '=', username]], - } - if count: - payload['query-options'] = {'count': True} - return call('audit.query', payload) - - -def wait_for_events(username, newcount, retries=20, delay=0.5): - assert retries > 0 and retries <= 20 - assert delay >= 0.1 and delay <= 1 - while newcount != user_sudo_events(username, True) and retries: - time.sleep(delay) - retries -= 1 - return newcount - - -def assert_accept(event): - assert type(event) is dict - set(event.keys()) == EVENT_KEYS - assert set(event['event_data'].keys()) == {'sudo'} - assert set(event['event_data']['sudo'].keys()) == {'accept'} - assert set(event['event_data']['sudo']['accept'].keys()) == ACCEPT_KEYS - return event['event_data']['sudo']['accept'] - - -def assert_reject(event): - assert type(event) is dict - set(event.keys()) == EVENT_KEYS - assert set(event['event_data'].keys()) == {'sudo'} - assert set(event['event_data']['sudo'].keys()) == {'reject'} - assert set(event['event_data']['sudo']['reject'].keys()) == REJECT_KEYS - return event['event_data']['sudo']['reject'] - - -@contextlib.contextmanager -def initialize_for_sudo_tests(username, password, data): - data.update({ - 'username': username, - 'full_name': username, - 'group_create': True, - 'password': password, - 'shell': '/usr/bin/bash', - 'ssh_password_enabled': True, - }) - with user(data) as newuser: - yield newuser - - -@pytest.fixture(scope='module') -def sudo_to_user(): - with initialize_for_sudo_tests(SUDO_TO_USER, SUDO_TO_PASSWORD, {}) as u: - yield u - - -class SudoTests: - - def generate_command(self, cmd, runuser=None, password=None): - command = ['sudo'] - if password: - command.append('-S') - if runuser: - command.extend(['-u', runuser]) - command.append(cmd) - return " ".join(command) - - def allowed_all(self): - """All of the sudo commands are allowed""" - # First get a baseline # of events - count = user_sudo_events(self.USER, True) - - # Now create an event and do some basic checking - self.sudo_command('ls /etc') - assert count + 1 == wait_for_events(self.USER, count + 1) - accept = assert_accept(user_sudo_events(self.USER)[-1]) - assert accept['submituser'] == self.USER - assert accept['command'] == LS_COMMAND - assert accept['runuser'] == 'root' - assert accept['runargv'].split(',') == ['ls', '/etc'] - - # One more completely unique command - magic = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(20)) - self.sudo_command(f'echo {magic}') - assert count + 2 == wait_for_events(self.USER, count + 2) - accept = assert_accept(user_sudo_events(self.USER)[-1]) - assert accept['submituser'] == self.USER - assert accept['command'] == ECHO_COMMAND - assert accept['runuser'] == 'root' - assert accept['runargv'].split(',') == ['echo', magic] - - # sudo to a non-root user - self.sudo_command('ls /tmp', SUDO_TO_USER) - assert count + 3 == wait_for_events(self.USER, count + 3) - accept = assert_accept(user_sudo_events(self.USER)[-1]) - assert accept['submituser'] == self.USER - assert accept['command'] == LS_COMMAND - assert accept['runuser'] == SUDO_TO_USER - assert accept['runargv'].split(',') == ['ls', '/tmp'] - - def allowed_some(self): - """Some of the sudo commands are allowed""" - # First get a baseline # of events - count = user_sudo_events(self.USER, True) - - # Generate a sudo command that we ARE allowed perform - magic = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(20)) - self.sudo_command(f'echo {magic}') - assert count + 1 == wait_for_events(self.USER, count + 1) - accept = assert_accept(user_sudo_events(self.USER)[-1]) - assert accept['submituser'] == self.USER - assert accept['command'] == ECHO_COMMAND - assert accept['runuser'] == 'root' - assert accept['runargv'].split(',') == ['echo', magic] - - # Generate a sudo command that we are NOT allowed perform - with pytest.raises(AssertionError): - self.sudo_command('ls /etc') - # Returned exception depends upon whether passwd or nopasswd - assert count + 2 == wait_for_events(self.USER, count + 2) - reject = assert_reject(user_sudo_events(self.USER)[-1]) - assert reject['submituser'] == self.USER - assert reject['command'] == LS_COMMAND - assert reject['runuser'] == 'root' - assert reject['runargv'].split(',') == ['ls', '/etc'] - assert reject['reason'] == 'command not allowed' - - def allowed_none(self): - """None of the sudo commands are allowed""" - # First get a baseline # of events - count = user_sudo_events(self.USER, True) - - # Now create an event and do some basic checking to ensure it failed - with pytest.raises(AssertionError) as ve: - self.sudo_command('ls /etc') - assert 'is not allowed to execute ' in str(ve), str(ve) - assert count + 1 == wait_for_events(self.USER, count + 1) - reject = assert_reject(user_sudo_events(self.USER)[-1]) - assert reject['submituser'] == self.USER - assert reject['command'] == LS_COMMAND - assert reject['runuser'] == 'root' - assert reject['runargv'].split(',') == ['ls', '/etc'] - assert reject['reason'] == 'command not allowed' - - -class SudoNoPasswd: - def sudo_command(self, cmd, runuser=None): - command = self.generate_command(cmd, runuser) - ssh(command, user=self.USER, password=self.PASSWORD) - - -class SudoPasswd: - def sudo_command(self, cmd, runuser=None): - command = f'echo {self.PASSWORD} | {self.generate_command(cmd, runuser, self.PASSWORD)}' - ssh(command, user=self.USER, password=self.PASSWORD) - - -class TestSudoAllowedAllNoPasswd(SudoTests, SudoNoPasswd): - - USER = 'sudo-allowed-all-nopw-user' - PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) - - @pytest.fixture(scope='class') - def create_user(self): - with initialize_for_sudo_tests(self.USER, - self.PASSWORD, - {'sudo_commands_nopasswd': ['ALL']}) as u: - yield u - - def test_audit_query(self, sudo_to_user, create_user): - self.allowed_all() - - -class TestSudoAllowedAllPasswd(SudoTests, SudoPasswd): - - USER = 'sudo-allowed-all-pw-user' - PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) - - @pytest.fixture(scope='class') - def create_user(self): - with initialize_for_sudo_tests(self.USER, - self.PASSWORD, - {'sudo_commands': ['ALL']}) as u: - yield u - - def test_audit_query(self, sudo_to_user, create_user): - self.allowed_all() - - -class TestSudoAllowedNonePasswd(SudoTests, SudoPasswd): - - USER = 'sudo-allowed-none-pw-user' - PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) - - @pytest.fixture(scope='class') - def create_user(self): - with initialize_for_sudo_tests(self.USER, self.PASSWORD, {}) as u: - yield u - - def test_audit_query(self, create_user): - self.allowed_none() - - -class TestSudoAllowedSomeNoPasswd(SudoTests, SudoNoPasswd): - - USER = 'sudo-allowed-some-nopw-user' - PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) - - @pytest.fixture(scope='class') - def create_user(self): - with initialize_for_sudo_tests(self.USER, - self.PASSWORD, - {'sudo_commands_nopasswd': [ECHO_COMMAND]}) as u: - yield u - - def test_audit_query(self, create_user): - self.allowed_some() - - -class TestSudoAllowedSomePasswd(SudoTests, SudoPasswd): - - USER = 'sudo-allowed-some-pw-user' - PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) - - @pytest.fixture(scope='class') - def create_user(self): - with initialize_for_sudo_tests(self.USER, - self.PASSWORD, - {'sudo_commands': [ECHO_COMMAND]}) as u: - yield u - - def test_audit_query(self, create_user): - self.allowed_some() diff --git a/tests/api2/test_audit_websocket.py b/tests/api2/test_audit_websocket.py deleted file mode 100644 index 93ef6ec9ac8af..0000000000000 --- a/tests/api2/test_audit_websocket.py +++ /dev/null @@ -1,372 +0,0 @@ -# -*- coding=utf-8 -*- -from unittest.mock import ANY - -import pytest - -from truenas_api_client import ClientException -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.account import unprivileged_user_client, user -from middlewared.test.integration.assets.api_key import api_key -from middlewared.test.integration.utils import call, client, ssh -from middlewared.test.integration.utils.audit import expect_audit_log - - -def test_unauthenticated_call(): - with client(auth=None) as c: - with expect_audit_log([ - { - "service_data": { - "vers": { - "major": 0, - "minor": 1, - }, - "origin": ANY, - "protocol": "WEBSOCKET", - "credentials": None, - }, - "event": "METHOD_CALL", - "event_data": { - "authenticated": False, - "authorized": False, - "method": "user.create", - "params": [{"username": "sergey", "full_name": "Sergey"}], - "description": "Create user sergey", - }, - "success": False, - } - ]): - with pytest.raises(ClientException): - c.call("user.create", {"username": "sergey", "full_name": "Sergey"}) - - -def test_unauthorized_call(): - with unprivileged_user_client() as c: - with expect_audit_log([ - { - "service_data": { - "vers": { - "major": 0, - "minor": 1, - }, - "origin": ANY, - "protocol": "WEBSOCKET", - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": ANY}, - }, - }, - "event": "METHOD_CALL", - "event_data": { - "authenticated": True, - "authorized": False, - "method": "user.create", - "params": [{"username": "sergey", "full_name": "Sergey"}], - "description": "Create user sergey", - }, - "success": False, - } - ]): - with pytest.raises(ClientException): - c.call("user.create", {"username": "sergey", "full_name": "Sergey"}) - - -def test_bogus_call(): - with client() as c: - with expect_audit_log([ - { - "service_data": { - "vers": { - "major": 0, - "minor": 1, - }, - "origin": ANY, - "protocol": "WEBSOCKET", - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "root"}, - }, - }, - "event": "METHOD_CALL", - "event_data": { - "authenticated": True, - "authorized": True, - "method": "user.create", - "params": [{}], - "description": "Create user", - }, - "success": False, - } - ]): - with pytest.raises(ValidationErrors): - c.call("user.create", {}) - - -def test_invalid_call(): - with client() as c: - with expect_audit_log([ - { - "service_data": { - "vers": { - "major": 0, - "minor": 1, - }, - "origin": ANY, - "protocol": "WEBSOCKET", - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "root"}, - }, - }, - "event": "METHOD_CALL", - "event_data": { - "authenticated": True, - "authorized": True, - "method": "user.create", - "params": [{"username": "sergey", "password": "********"}], - "description": "Create user sergey", - }, - "success": False, - } - ]): - with pytest.raises(ValidationErrors): - c.call("user.create", {"username": "sergey", "password": "password"}) - - -def test_typo_in_secret_credential_name(): - with client() as c: - with expect_audit_log([ - { - "service_data": { - "vers": { - "major": 0, - "minor": 1, - }, - "origin": ANY, - "protocol": "WEBSOCKET", - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "root"}, - }, - }, - "event": "METHOD_CALL", - "event_data": { - "authenticated": True, - "authorized": True, - "method": "user.create", - "params": [{"username": "sergey"}], - "description": "Create user sergey", - }, - "success": False, - } - ]): - with pytest.raises(ValidationErrors): - c.call("user.create", {"username": "sergey", "passwrod": "password"}) - - -def test_valid_call(): - with expect_audit_log([ - { - "service_data": { - "vers": { - "major": 0, - "minor": 1, - }, - "origin": ANY, - "protocol": "WEBSOCKET", - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "root"}, - }, - }, - "event": "METHOD_CALL", - "event_data": { - "authenticated": True, - "authorized": True, - "method": "user.create", - "params": [ - { - "username": "sergey", - "full_name": "Sergey", - "group_create": True, - "home": "/nonexistent", - "password": "********", - "home_create": True, - } - ], - "description": "Create user sergey", - }, - "success": True, - }, - {}, - ]): - with user({ - "username": "sergey", - "full_name": "Sergey", - "group_create": True, - "home": "/nonexistent", - "password": "password", - }): - pass - - -def test_password_login(): - with expect_audit_log([ - { - "service_data": { - "vers": { - "major": 0, - "minor": 1, - }, - "origin": ANY, - "protocol": "WEBSOCKET", - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "root"}, - }, - }, - "event": "AUTHENTICATION", - "event_data": { - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "root"}, - }, - "error": None, - }, - "success": True, - } - ], include_logins=True): - with client(): - pass - - -def test_password_login_failed(): - with expect_audit_log([ - { - "event": "AUTHENTICATION", - "event_data": { - "credentials": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "invalid"}, - }, - "error": "Bad username or password", - }, - "success": False, - } - ], include_logins=True): - with client(auth=("invalid", ""), auth_required=False): - pass - - -def test_token_login(): - token = call("auth.generate_token", 300, {}, True) - - with client(auth=None) as c: - with expect_audit_log([ - { - "event": "AUTHENTICATION", - "event_data": { - "credentials": { - "credentials": "TOKEN", - "credentials_data": { - "parent": { - "credentials": "LOGIN_PASSWORD", - "credentials_data": {"username": "root"}, - }, - "username": "root", - }, - }, - "error": None, - }, - "success": True, - } - ], include_logins=True): - assert c.call("auth.login_with_token", token) - - -def test_token_login_failed(): - with client(auth=None) as c: - with expect_audit_log([ - { - "event": "AUTHENTICATION", - "event_data": { - "credentials": { - "credentials": "TOKEN", - "credentials_data": { - "token": "invalid_token", - }, - }, - "error": "Invalid token", - }, - "success": False, - } - ], include_logins=True): - c.call("auth.login_with_token", "invalid_token") - - -def test_token_attributes_login_failed(): - token = call("auth.generate_token", 300, {"filename": "debug.txz", "job": 1020}, True) - - with client(auth=None) as c: - with expect_audit_log([ - { - "event": "AUTHENTICATION", - "event_data": { - "credentials": { - "credentials": "TOKEN", - "credentials_data": { - "token": token, - }, - }, - "error": "Bad token", - }, - "success": False, - } - ], include_logins=True): - c.call("auth.login_with_token", token) - - -def test_api_key_login(): - with api_key([]) as key: - with client(auth=None) as c: - with expect_audit_log([ - { - "event": "AUTHENTICATION", - "event_data": { - "credentials": { - "credentials": "API_KEY", - "credentials_data": { - "api_key": {"id": ANY, "name": ANY}, - }, - }, - "error": None, - }, - "success": True, - } - ], include_logins=True): - assert c.call("auth.login_with_api_key", key) - - -def test_api_key_login_failed(): - with client(auth=None) as c: - with expect_audit_log([ - { - "event": "AUTHENTICATION", - "event_data": { - "credentials": { - "credentials": "API_KEY", - "credentials_data": { - "api_key": "invalid_api_key", - }, - }, - "error": "Invalid API key", - }, - "success": False, - } - ], include_logins=True): - c.call("auth.login_with_api_key", "invalid_api_key") - - -@pytest.mark.parametrize('logfile', ('/var/log/messages', '/var/log/syslog')) -def test_check_syslog_leak(logfile): - entries = ssh(f'grep @cee {logfile}', check=False) - assert '@cee' not in entries diff --git a/tests/api2/test_auth_me.py b/tests/api2/test_auth_me.py deleted file mode 100644 index e26070e7aeb3c..0000000000000 --- a/tests/api2/test_auth_me.py +++ /dev/null @@ -1,106 +0,0 @@ -import pytest - -from middlewared.service_exception import CallError -from middlewared.test.integration.assets.account import unprivileged_user_client -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.api_key import api_key -from middlewared.test.integration.utils import call, client - - -def test_works(): - user = call("auth.me") - - assert user["pw_uid"] == 0 - assert user["pw_name"] == "root" - assert user['two_factor_config'] is not None - assert user['privilege']['webui_access'] - - -def test_works_for_token(): - token = call("auth.generate_token", 300) - - with client(auth=None) as c: - assert c.call("auth.login_with_token", token) - - user = c.call("auth.me") - - assert user["pw_uid"] == 0 - assert user["pw_name"] == "root" - assert user['two_factor_config'] is not None - assert 'SYS_ADMIN' in user['account_attributes'] - assert 'LOCAL' in user['account_attributes'] - - -def test_does_not_work_for_api_key(): - with api_key([{"method": "CALL", "resource": "auth.me"}]) as key: - with client(auth=None) as c: - assert c.call("auth.login_with_api_key", key) - - with pytest.raises(CallError) as ve: - c.call("auth.me") - - assert ve.value.errmsg == "You are logged in using API_KEY" - - -def test_attributes(): - user = call("auth.me") - assert "test" not in user["attributes"] - - call("auth.set_attribute", "test", "value") - - user = call("auth.me") - assert user["attributes"]["test"] == "value" - - call("auth.set_attribute", "test", "new_value") - - user = call("auth.me") - assert user["attributes"]["test"] == "new_value" - - -def test_distinguishes_attributes(): - builtin_administrators_group_id = call( - "datastore.query", - "account.bsdgroups", - [["group", "=", "builtin_administrators"]], - {"get": True, "prefix": "bsdgrp_"}, - )["id"] - - with user({ - "username": "admin", - "full_name": "Admin", - "group_create": True, - "groups": [builtin_administrators_group_id], - "home": f"/nonexistent", - "password": "test1234", - }) as admin: - with client(auth=("admin", "test1234")) as c: - me = c.call("auth.me") - assert "test" not in me["attributes"] - - c.call("auth.set_attribute", "test", "value") - - me = c.call("auth.me") - assert me["attributes"]["test"] == "value" - - c.call("auth.set_attribute", "test", "new_value") - - me = c.call("auth.me") - assert me["attributes"]["test"] == "new_value" - assert me['two_factor_config'] is not None - assert 'SYS_ADMIN' not in me['account_attributes'] - assert 'LOCAL' in me['account_attributes'] - assert me['privilege']['webui_access'] - - assert not call("datastore.query", "account.bsdusers_webui_attribute", [["uid", "=", admin["uid"]]]) - - -@pytest.mark.parametrize("role,expected", [ - (["READONLY_ADMIN", "FILESYSTEM_ATTRS_WRITE"], True), - (["READONLY_ADMIN"], True), - (["SHARING_ADMIN"], True), - (["FILESYSTEM_ATTRS_WRITE"], False) -]) -def test_webui_access(role, expected): - with unprivileged_user_client(roles=role) as c: - me = c.call('auth.me') - assert me['privilege']['webui_access'] == expected diff --git a/tests/api2/test_auth_otp.py b/tests/api2/test_auth_otp.py deleted file mode 100644 index bad920b92863a..0000000000000 --- a/tests/api2/test_auth_otp.py +++ /dev/null @@ -1,43 +0,0 @@ -import io -import json - -import pytest - -from middlewared.test.integration.utils import call, session, ssh, url - - -@pytest.fixture(scope="module") -def otp_enabled(): - call("auth.twofactor.update", {"enabled": True}) - - try: - yield - finally: - ssh("midclt call auth.twofactor.update '{\"enabled\": false}'") - - -def test_otp_http_basic_auth(otp_enabled): - with session() as s: - r = s.put(f"{url()}/api/v2.0/auth/twofactor/", data=json.dumps({"enabled": False})) - assert r.status_code == 401 - assert r.text == "HTTP Basic Auth is unavailable when OTP is enabled" - - -def test_otp_http_basic_auth_upload(otp_enabled): - with session() as s: - r = s.post( - f"{url()}/_upload/", - data={ - "data": json.dumps({ - "method": "filesystem.put", - "params": ["/tmp/upload"], - }) - }, - files={ - "file": io.BytesIO(b"test"), - }, - ) - assert r.status_code == 401 - assert r.text == "HTTP Basic Auth is unavailable when OTP is enabled" - - diff --git a/tests/api2/test_auth_token.py b/tests/api2/test_auth_token.py deleted file mode 100644 index 2fec805bb223c..0000000000000 --- a/tests/api2/test_auth_token.py +++ /dev/null @@ -1,115 +0,0 @@ -import io -import json - -import pytest -import requests - -import os -import sys -sys.path.append(os.getcwd()) -from functions import GET - -from middlewared.test.integration.assets.account import unprivileged_user as unprivileged_user_template -from middlewared.test.integration.utils import call, client, ssh -from middlewared.test.integration.utils.client import truenas_server -from middlewared.test.integration.utils.shell import assert_shell_works - - -@pytest.fixture(scope="module") -def download_token(): - return call("auth.generate_token", 300, {"filename": "debug.txz", "job": 1020}, True) - - -def test_download_auth_token_cannot_be_used_for_restful_api_call(download_token): - results = GET("/user/id/1/", anonymous=True, headers={"Authorization": f"Token {download_token}"}) - assert results.status_code == 403, results.text - - -def test_download_auth_token_cannot_be_used_for_upload(download_token): - r = requests.post( - f"http://{truenas_server.ip}/_upload", - headers={"Authorization": f"Token {download_token}"}, - data={ - "data": json.dumps({ - "method": "filesystem.put", - "params": ["/tmp/upload"], - }) - }, - files={ - "file": io.BytesIO(b"test"), - }, - timeout=10 - ) - assert r.status_code == 403 - - -def test_download_auth_token_cannot_be_used_for_websocket_auth(download_token): - with client(auth=None) as c: - assert not c.call("auth.login_with_token", download_token) - - -@pytest.mark.timeout(30) -def test_token_created_by_token_can_use_shell(): - with client() as c: - token = c.call("auth.generate_token", 300, {}, True) - - with client(auth=None) as c2: - assert c2.call("auth.login_with_token", token) - - token2 = c2.call("auth.generate_token", 300, {}, True) - assert_shell_works(token2, "root") - - -@pytest.fixture(scope="module") -def unprivileged_user(): - with unprivileged_user_template( - username="test", - group_name="test", - privilege_name="test", - allowlist=[{"method": "CALL", "resource": "system.info"}], - web_shell=True, - ): - yield - - -def test_login_with_token_match_origin(unprivileged_user): - token = ssh( - "sudo -u test midclt -u ws://localhost/websocket -U test -P test1234 call auth.generate_token 300 '{}' true" - ).strip() - - with client(auth=None) as c: - assert not c.call("auth.login_with_token", token) - - -def test_login_with_token_no_match_origin(unprivileged_user): - token = ssh( - "sudo -u test midclt -u ws://localhost/websocket -U test -P test1234 call auth.generate_token 300" - ).strip() - - with client(auth=None) as c: - assert c.call("auth.login_with_token", token) - - -def test_token_is_for_one_time_use(): - token = call("auth.generate_token", 300) - - with client(auth=None) as c: - assert c.call("auth.login_with_token", token) - - with client(auth=None) as c: - assert not c.call("auth.login_with_token", token) - - -def test_kill_all_tokens_on_session_termination(): - token = call("auth.generate_token", 300) - - with client(auth=None) as c: - assert c.call("auth.login_with_token", token) - - token = c.call("auth.generate_token") - - session = c.call("auth.sessions", [["current", "=", True]], {"get": True}) - call("auth.terminate_session", session["id"]) - - with client(auth=None) as c: - assert not c.call("auth.login_with_token", token) diff --git a/tests/api2/test_block_hooks.py b/tests/api2/test_block_hooks.py deleted file mode 100644 index eeb0770190a51..0000000000000 --- a/tests/api2/test_block_hooks.py +++ /dev/null @@ -1,31 +0,0 @@ -import uuid - -import pytest - -from middlewared.test.integration.utils import client, mock - - - -@pytest.mark.parametrize("block", [True, False]) -def test_block_hooks(block): - hook_name = str(uuid.uuid4()) - - with mock("test.test1", """ - async def mock(self, hook_name, blocked_hooks): - from pathlib import Path - - sentinel = Path("/tmp/block_hooks_sentinel") - - async def hook(middleware): - sentinel.write_text("") - - self.middleware.register_hook(hook_name, hook, blockable=True, sync=True) - - sentinel.unlink(missing_ok=True) - with self.middleware.block_hooks(*blocked_hooks): - await self.middleware.call_hook(hook_name) - - return sentinel.exists() - """): - with client() as c: - assert c.call("test.test1", hook_name, [hook_name] if block else []) == (not block) diff --git a/tests/api2/test_boot_attach_replace_detach.py b/tests/api2/test_boot_attach_replace_detach.py deleted file mode 100644 index 6d9245c84f06d..0000000000000 --- a/tests/api2/test_boot_attach_replace_detach.py +++ /dev/null @@ -1,57 +0,0 @@ -import pytest - -from middlewared.test.integration.utils import call -from auto_config import ha - -if not ha: - # the HA VMs only have 1 extra disk at time - # of writing this. QE is aware and is working - # on adding more disks to them so in the meantime - # we have to skip this test since it will fail - # 100% of the time on HA VMs. - - @pytest.mark.timeout(600) - def test_boot_attach_replace_detach(): - existing_disks = call("boot.get_disks") - assert len(existing_disks) == 1 - - unused = call("disk.get_unused") - to_attach = unused[0]["name"] - replace_with = unused[1]["name"] - - # Attach a disk and wait for resilver to finish - call("boot.attach", to_attach, job=True) - while True: - state = call("boot.get_state") - if not ( - state["scan"] and - state["scan"]["function"] == "RESILVER" and - state["scan"]["state"] == "SCANNING" - ): - break - - assert state["topology"]["data"][0]["type"] == "MIRROR" - - assert state["topology"]["data"][0]["children"][0]["status"] == "ONLINE" - - to_replace = state["topology"]["data"][0]["children"][1]["name"] - assert to_replace.startswith(to_attach) - assert state["topology"]["data"][0]["children"][1]["status"] == "ONLINE" - - # Replace newly attached disk - call("boot.replace", to_replace, replace_with, job=True) - # Resilver is a part of replace routine - state = call("boot.get_state") - - assert state["topology"]["data"][0]["type"] == "MIRROR" - - assert state["topology"]["data"][0]["children"][0]["status"] == "ONLINE" - - to_detach = state["topology"]["data"][0]["children"][1]["name"] - assert to_detach.startswith(replace_with) - assert state["topology"]["data"][0]["children"][1]["status"] == "ONLINE" - - # Detach replaced disk, returning the pool to its initial state - call("boot.detach", to_detach) - - assert len(call("boot.get_disks")) == 1 diff --git a/tests/api2/test_boot_format.py b/tests/api2/test_boot_format.py deleted file mode 100644 index 8dbb0177ba89a..0000000000000 --- a/tests/api2/test_boot_format.py +++ /dev/null @@ -1,13 +0,0 @@ -from middlewared.test.integration.utils import call - - -def test_optimal_disk_usage(): - disk = call('disk.get_unused')[0] - data_size = ( - disk['size'] - - 1 * 1024 * 1024 - # BIOS boot - 512 * 1024 * 1024 - # EFI - 73 * 512 # GPT + alignment - ) - # Will raise an exception if we fail to format the disk with given harsh restrictions - call('boot.format', disk['name'], {'size': data_size}) diff --git a/tests/api2/test_bootenv.py b/tests/api2/test_bootenv.py deleted file mode 100644 index 62dc0cb728098..0000000000000 --- a/tests/api2/test_bootenv.py +++ /dev/null @@ -1,24 +0,0 @@ -from middlewared.test.integration.utils import call, ssh - - -def test_promote_current_be_datasets(): - var_log = ssh("df | grep /var/log").split()[0] - - snapshot_name = "snap-1" - snapshot = f"{var_log}@{snapshot_name}" - ssh(f"zfs snapshot {snapshot}") - try: - clone = "boot-pool/ROOT/clone" - ssh(f"zfs clone {snapshot} {clone}") - try: - ssh(f"zfs promote {clone}") - - assert ssh(f"zfs get -H -o value origin {var_log}").strip() == f"{clone}@{snapshot_name}" - - call("bootenv.promote_current_be_datasets") - - assert ssh(f"zfs get -H -o value origin {var_log}").strip() == "-" - finally: - ssh(f"zfs destroy {clone}") - finally: - ssh(f"zfs destroy {snapshot}") diff --git a/tests/api2/test_can_access_as_user.py b/tests/api2/test_can_access_as_user.py deleted file mode 100644 index a457988266de2..0000000000000 --- a/tests/api2/test_can_access_as_user.py +++ /dev/null @@ -1,81 +0,0 @@ -import contextlib -import pytest - -from middlewared.test.integration.assets.pool import dataset, pool -from middlewared.test.integration.utils import call, ssh - -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) - - -@contextlib.contextmanager -def file(name, user, group, permissions): - with dataset('test_perms', pool=pool) as test_dataset: - path = os.path.join('/mnt', test_dataset, name) - with file_at_path(path, user, group, permissions): - yield path - - -@contextlib.contextmanager -def file_at_path(path, user, group, permissions): - ssh(f'install -o {user} -g {group} -m {permissions} /dev/null {path}') - try: - yield path - finally: - ssh(f'rm -f {path}') - - -@contextlib.contextmanager -def directory(name, user, group, permissions): - with dataset('test_perms', pool=pool) as test_dataset: - path = os.path.join('/mnt', test_dataset, name) - ssh(f'mkdir -p -m {permissions} {path}') - ssh(f'chown -R {user}:{group} {path}') - - try: - yield path - finally: - ssh(f'rm -rf {path}') - - -def test_non_authorized_user_access(): - with file('test', 'root', 'root', '700') as file_path: - for perm_check in ('read', 'write', 'execute'): - assert call('filesystem.can_access_as_user', 'nobody', file_path, {perm_check: True}) is False - - -def test_authorized_user_access(): - for user, group in (('apps', 'apps'), ('nobody', 'nogroup')): - with file('test', user, group, '700') as file_path: - for perm_check in ('read', 'write', 'execute'): - assert call('filesystem.can_access_as_user', user, file_path, {perm_check: True}) is True - - -def test_read_access(): - for user, group in (('apps', 'apps'), ('nobody', 'nogroup')): - with file('test', user, group, '400') as file_path: - for perm_check, value in (('read', True), ('write', False), ('execute', False)): - assert call('filesystem.can_access_as_user', user, file_path, {perm_check: True}) is value - - -def test_write_access(): - for user, group in (('apps', 'apps'), ('nobody', 'nogroup')): - with file('test', user, group, '200') as file_path: - for perm_check, value in (('read', False), ('write', True), ('execute', False)): - assert call('filesystem.can_access_as_user', user, file_path, {perm_check: True}) is value - - -def test_execute_access(): - for user, group in (('apps', 'apps'), ('nobody', 'nogroup')): - with file('test', user, group, '100') as file_path: - for perm_check, value in (('read', False), ('write', False), ('execute', True)): - assert call('filesystem.can_access_as_user', user, file_path, {perm_check: True}) is value - - -def test_nested_perm_execute_check(): - with directory('test_dir', 'root', 'root', '700') as dir_path: - file_path = os.path.join(dir_path, 'testfile') - with file_at_path(file_path, 'root', 'root', '777'): - assert call('filesystem.can_access_as_user', 'apps', file_path, {'execute': True}) is False diff --git a/tests/api2/test_certificate_roles.py b/tests/api2/test_certificate_roles.py deleted file mode 100644 index 94144e1bb4d91..0000000000000 --- a/tests/api2/test_certificate_roles.py +++ /dev/null @@ -1,41 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.roles import common_checks - - -@pytest.mark.parametrize('method, role, valid_role', ( - ('certificate.profiles', 'CERTIFICATE_READ', True), - ('certificateauthority.profiles', 'CERTIFICATE_AUTHORITY_READ', True), - ('certificate.profiles', 'CERTIFICATE_AUTHORITY_READ', False), - ('certificateauthority.profiles', 'CERTIFICATE_READ', False), -)) -def test_profiles_read_roles(unprivileged_user_fixture, method, role, valid_role): - common_checks(unprivileged_user_fixture, method, role, valid_role, valid_role_exception=False) - - -@pytest.mark.parametrize('role, valid_role', ( - ('CERTIFICATE_AUTHORITY_WRITE', True), - ('CERTIFICATE_AUTHORITY_READ', False), -)) -def test_certificate_authority_create_role(unprivileged_user_fixture, role, valid_role): - common_checks(unprivileged_user_fixture, 'certificateauthority.create', role, valid_role, method_args=[{}]) - - -@pytest.mark.parametrize('role, valid_role', ( - ('CERTIFICATE_WRITE', True), - ('CERTIFICATE_READ', False), -)) -def test_certificate_create_role(unprivileged_user_fixture, role, valid_role): - common_checks(unprivileged_user_fixture, 'certificate.create', role, valid_role, method_args=[], method_kwargs={'job': True}) - - -@pytest.mark.parametrize('role, valid_role', ( - ('CERTIFICATE_AUTHORITY_WRITE', True), - ('CERTIFICATE_AUTHORITY_READ', False), -)) -def test_signing_csr_role(unprivileged_user_fixture, role, valid_role): - common_checks(unprivileged_user_fixture, 'certificateauthority.ca_sign_csr', role, valid_role, method_args=[{ - 'ca_id': 1, - 'csr_cert_id': 1, - 'name': 'test_csr_signing_role', - }]) diff --git a/tests/api2/test_certs.py b/tests/api2/test_certs.py deleted file mode 100644 index 9388b8ed3ef17..0000000000000 --- a/tests/api2/test_certs.py +++ /dev/null @@ -1,552 +0,0 @@ -import pytest - -from truenas_api_client import ValidationErrors -from middlewared.test.integration.assets.crypto import ( - certificate_signing_request, get_cert_params, intermediate_certificate_authority, root_certificate_authority -) -from middlewared.test.integration.utils import call - -import sys -import textwrap -import os -apifolder = os.getcwd() -sys.path.append(apifolder) - - -# We would like to test the following cases -# Creating root CA -# Creating intermediate CA -# Importing CA -# Creating certificate from root/intermediate CAs -# Create CSR -# Signing CSR - -def test_creating_root_ca(): - root_ca = call('certificateauthority.create', { - **get_cert_params(), - 'name': 'test_root_ca', - 'create_type': 'CA_CREATE_INTERNAL', - }) - try: - assert root_ca['CA_type_internal'] is True, root_ca - finally: - call('certificateauthority.delete', root_ca['id']) - - -def test_root_ca_issuer_reported_correctly(): - with root_certificate_authority('root_ca_test') as root_ca: - assert root_ca['issuer'] == 'self-signed', root_ca - - -def test_creating_intermediate_ca(): - with root_certificate_authority('root_ca_test') as root_ca: - intermediate_ca = call('certificateauthority.create', { - **get_cert_params(), - 'signedby': root_ca['id'], - 'name': 'test_intermediate_ca', - 'create_type': 'CA_CREATE_INTERMEDIATE', - }) - try: - assert intermediate_ca['CA_type_intermediate'] is True, intermediate_ca - finally: - call('certificateauthority.delete', intermediate_ca['id']) - - -def test_ca_intermediate_issuer_reported_correctly(): - with root_certificate_authority('root_ca_test') as root_ca: - intermediate_ca = call('certificateauthority.create', { - **get_cert_params(), - 'signedby': root_ca['id'], - 'name': 'test_intermediate_ca', - 'create_type': 'CA_CREATE_INTERMEDIATE', - }) - root_ca = call('certificateauthority.get_instance', root_ca['id']) - try: - assert intermediate_ca['issuer'] == root_ca, intermediate_ca - finally: - call('certificateauthority.delete', intermediate_ca['id']) - - -def test_cert_chain_of_intermediate_ca_reported_correctly(): - with root_certificate_authority('root_ca_test') as root_ca: - intermediate_ca = call('certificateauthority.create', { - **get_cert_params(), - 'signedby': root_ca['id'], - 'name': 'test_intermediate_ca', - 'create_type': 'CA_CREATE_INTERMEDIATE', - }) - try: - assert intermediate_ca['chain_list'] == [ - intermediate_ca['certificate'], root_ca['certificate'] - ], intermediate_ca - finally: - call('certificateauthority.delete', intermediate_ca['id']) - - -def test_importing_ca(): - with root_certificate_authority('root_ca_test') as root_ca: - imported_ca = call('certificateauthority.create', { - 'certificate': root_ca['certificate'], - 'privatekey': root_ca['privatekey'], - 'name': 'test_imported_ca', - 'create_type': 'CA_CREATE_IMPORTED', - }) - try: - assert imported_ca['CA_type_existing'] is True, imported_ca - finally: - call('certificateauthority.delete', imported_ca['id']) - - -def test_ca_imported_issuer_reported_correctly(): - with root_certificate_authority('root_ca_test') as root_ca: - imported_ca = call('certificateauthority.create', { - 'certificate': root_ca['certificate'], - 'privatekey': root_ca['privatekey'], - 'name': 'test_imported_ca', - 'create_type': 'CA_CREATE_IMPORTED', - }) - try: - assert imported_ca['issuer'] == 'external', imported_ca - finally: - call('certificateauthority.delete', imported_ca['id']) - - -def test_ca_imported_add_to_trusted_store_reported_correctly(): - with root_certificate_authority('root_ca_test') as root_ca: - imported_ca = call('certificateauthority.create', { - 'certificate': root_ca['certificate'], - 'privatekey': root_ca['privatekey'], - 'name': 'test_tinkerbell', - 'add_to_trusted_store': True, - 'create_type': 'CA_CREATE_IMPORTED', - }) - try: - assert imported_ca['add_to_trusted_store'] is True, imported_ca - finally: - call('certificateauthority.delete', imported_ca['id']) - - -def test_creating_cert_from_root_ca(): - with root_certificate_authority('root_ca_test') as root_ca: - cert = call('certificate.create', { - 'name': 'cert_test', - 'signedby': root_ca['id'], - 'create_type': 'CERTIFICATE_CREATE_INTERNAL', - **get_cert_params(), - }, job=True) - try: - assert cert['cert_type_internal'] is True, cert - finally: - call('certificate.delete', cert['id'], job=True) - - -def test_cert_chain_of_root_ca_reported_correctly(): - with root_certificate_authority('root_ca_test') as root_ca: - cert = call('certificate.create', { - 'name': 'cert_test', - 'signedby': root_ca['id'], - 'create_type': 'CERTIFICATE_CREATE_INTERNAL', - **get_cert_params(), - }, job=True) - try: - assert cert['chain_list'] == [cert['certificate'], root_ca['certificate']], cert - finally: - call('certificate.delete', cert['id'], job=True) - - -def test_creating_cert_from_intermediate_ca(): - with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca): - cert = call('certificate.create', { - 'name': 'cert_test', - 'signedby': intermediate_ca['id'], - 'create_type': 'CERTIFICATE_CREATE_INTERNAL', - **get_cert_params(), - }, job=True) - try: - assert cert['cert_type_internal'] is True, cert - finally: - call('certificate.delete', cert['id'], job=True) - - -def test_cert_chain_reported_correctly(): - with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca): - cert = call('certificate.create', { - 'name': 'cert_test', - 'signedby': intermediate_ca['id'], - 'create_type': 'CERTIFICATE_CREATE_INTERNAL', - **get_cert_params(), - }, job=True) - try: - assert cert['chain_list'] == [ - cert['certificate'], intermediate_ca['certificate'], root_ca['certificate'] - ], cert - finally: - call('certificate.delete', cert['id'], job=True) - - -def test_cert_issuer_reported_correctly(): - with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca): - cert = call('certificate.create', { - 'name': 'cert_test', - 'signedby': intermediate_ca['id'], - 'create_type': 'CERTIFICATE_CREATE_INTERNAL', - **get_cert_params(), - }, job=True) - intermediate_ca = call('certificateauthority.get_instance', intermediate_ca['id']) - try: - assert cert['issuer'] == intermediate_ca, cert - finally: - call('certificate.delete', cert['id'], job=True) - - -def test_creating_csr(): - with certificate_signing_request('csr_test') as csr: - assert csr['cert_type_CSR'] is True, csr - - -def test_issuer_of_csr(): - with certificate_signing_request('csr_test') as csr: - assert csr['issuer'] == 'external - signature pending', csr - - -def test_signing_csr(): - with root_certificate_authority('root_ca') as root_ca: - with certificate_signing_request('csr_test') as csr: - cert = call('certificateauthority.ca_sign_csr', { - 'ca_id': root_ca['id'], - 'csr_cert_id': csr['id'], - 'name': 'signed_cert', - }) - root_ca = call('certificateauthority.get_instance', root_ca['id']) - try: - assert isinstance(cert['signedby'], dict), cert - assert cert['signedby']['id'] == root_ca['id'], cert - assert cert['chain_list'] == [cert['certificate'], root_ca['certificate']] - assert cert['issuer'] == root_ca, cert - finally: - call('certificate.delete', cert['id'], job=True) - - -def test_revoking_cert(): - with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca): - cert = call('certificate.create', { - 'name': 'cert_test', - 'signedby': intermediate_ca['id'], - 'create_type': 'CERTIFICATE_CREATE_INTERNAL', - **get_cert_params(), - }, job=True) - try: - assert cert['can_be_revoked'] is True, cert - cert = call('certificate.update', cert['id'], {'revoked': True}, job=True) - assert cert['revoked'] is True, cert - - root_ca = call('certificateauthority.get_instance', root_ca['id']) - intermediate_ca = call('certificateauthority.get_instance', intermediate_ca['id']) - - assert len(root_ca['revoked_certs']) == 1, root_ca - assert len(intermediate_ca['revoked_certs']) == 1, intermediate_ca - - assert root_ca['revoked_certs'][0]['certificate'] == cert['certificate'], root_ca - assert intermediate_ca['revoked_certs'][0]['certificate'] == cert['certificate'], intermediate_ca - finally: - call('certificate.delete', cert['id'], job=True) - - -def test_revoking_ca(): - with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca): - cert = call('certificate.create', { - 'name': 'cert_test', - 'signedby': intermediate_ca['id'], - 'create_type': 'CERTIFICATE_CREATE_INTERNAL', - **get_cert_params(), - }, job=True) - try: - assert intermediate_ca['can_be_revoked'] is True, intermediate_ca - intermediate_ca = call('certificateauthority.update', intermediate_ca['id'], {'revoked': True}) - assert intermediate_ca['revoked'] is True, intermediate_ca - - cert = call('certificate.get_instance', cert['id']) - assert cert['revoked'] is True, cert - - root_ca = call('certificateauthority.get_instance', root_ca['id']) - assert len(root_ca['revoked_certs']) == 2, root_ca - assert len(intermediate_ca['revoked_certs']) == 2, intermediate_ca - - check_set = {intermediate_ca['certificate'], cert['certificate']} - assert set(c['certificate'] for c in intermediate_ca['revoked_certs']) == check_set, intermediate_ca - assert set(c['certificate'] for c in root_ca['revoked_certs']) == check_set, root_ca - finally: - call('certificate.delete', cert['id'], job=True) - - -def test_created_certs_exist_on_filesystem(): - with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca): - with certificate_signing_request('csr_test') as csr: - cert = call('certificate.create', { - 'name': 'cert_test', - 'signedby': intermediate_ca['id'], - 'create_type': 'CERTIFICATE_CREATE_INTERNAL', - **get_cert_params(), - }, job=True) - try: - assert get_cert_current_files() == get_cert_expected_files() - finally: - call('certificate.delete', cert['id'], job=True) - - -def test_deleted_certs_dont_exist_on_filesystem(): - with intermediate_certificate_authority('root_ca2', 'intermediate_ca2') as (root_ca2, intermediate_ca2): - # no-op - pass - with certificate_signing_request('csr_test2') as csr2: - pass - assert get_cert_current_files() == get_cert_expected_files() - - -def get_cert_expected_files(): - certs = call('certificate.query') - cas = call('certificateauthority.query') - expected_files = {'/etc/certificates/CA'} - for cert in certs + cas: - if cert['chain_list']: - expected_files.add(cert['certificate_path']) - if cert['privatekey']: - expected_files.add(cert['privatekey_path']) - if cert['cert_type_CSR']: - expected_files.add(cert['csr_path']) - if any(cert[k] for k in ('CA_type_existing', 'CA_type_internal', 'CA_type_intermediate')): - expected_files.add(cert['crl_path']) - return expected_files - - -def get_cert_current_files(): - return { - f['path'] - for p in ('/etc/certificates', '/etc/certificates/CA') for f in call('filesystem.listdir', p) - } - - -@pytest.mark.parametrize('life_time,should_work', [ - (300, True), - (9999999, False), -]) -def test_certificate_lifetime_validation(life_time, should_work): - cert_params = get_cert_params() - cert_params['lifetime'] = life_time - with root_certificate_authority('root-ca') as root_ca: - if should_work: - cert = None - try: - cert = call( - 'certificate.create', { - 'name': 'test-cert', - 'create_type': 'CERTIFICATE_CREATE_INTERNAL', - 'signedby': root_ca['id'], - **cert_params, - }, job=True - ) - assert cert['parsed'] is True, cert - finally: - if cert: - call('certificate.delete', cert['id'], job=True) - else: - with pytest.raises(ValidationErrors): - call( - 'certificate.create', { - 'name': 'test-cert', - 'signedby': root_ca['id'], - 'create_type': 'CERTIFICATE_CREATE_INTERNAL', - **cert_params, - }, job=True - ) - - -@pytest.mark.parametrize('certificate,private_key,should_work', [ - ( - textwrap.dedent('''\ - -----BEGIN CERTIFICATE----- - MIIEDTCCAvWgAwIBAgIEAKWUWTANBgkqhkiG9w0BAQsFADBVMQswCQYDVQQGEwJV - UzEMMAoGA1UECAwDdXNhMRMwEQYDVQQHDApjYWxpZm9ybmlhMQswCQYDVQQKDAJs - bTEWMBQGCSqGSIb3DQEJARYHYUBiLmNvbTAeFw0yMzA0MDYxNjQyMTJaFw0yNDA1 - MDcxNjQyMTJaME4xCzAJBgNVBAYTAlVTMQwwCgYDVQQIDAN1c2ExDDAKBgNVBAcM - A3VzYTELMAkGA1UECgwCbG0xFjAUBgkqhkiG9w0BCQEWB2FAYy5jb20wggEiMA0G - CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtvPEA2x3/jp0riSdgb7TqB9uAobzt - tYbW9E0+WLqf3sLJJ4F4Iq0AI1YYMtOOwcjmvC52eSaqxoGcY4G2J+RgQNR8b8lk - m38vRYQA2SkDCtEQFkLiCrkr5g20xh89gCLEr9c5x45p8Pl7q2LmE6wVIVjWqTSi - Yo4TMD8Nb5LN3vPeM7+fwV7FZDH7PJ4AT1/kTJjhkK0wiOGeTLEW5wiSYO8QMD0r - JHfzAp8UPFsVK8InZTjLS4VJgI0OlG2Von7Nv7Wtxsg5hi7dkLu2tawHE8DD97O5 - zhVTZHzBiDF1mrjR3+6RWgn8iF6353UV9hbyPYz51UiCEYHBwFtqQaBlAgMBAAGj - geswgegwDgYDVR0RBAcwBYIDYWJjMB0GA1UdDgQWBBSRzlS66ts6rhuCN+4VK2x7 - 8E+n1zAMBgNVHRMBAf8EAjAAMIGABgNVHSMEeTB3gBR1fZ31S5XHrijsT/C9fzbB - aqrg5qFZpFcwVTELMAkGA1UEBhMCVVMxDDAKBgNVBAgMA3VzYTETMBEGA1UEBwwK - Y2FsaWZvcm5pYTELMAkGA1UECgwCbG0xFjAUBgkqhkiG9w0BCQEWB2FAYi5jb22C - BACllFgwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwIwDgYDVR0PAQH/BAQDAgOIMA0G - CSqGSIb3DQEBCwUAA4IBAQA7UwYNr6gspgRcCGwzl5RUAL/N3NXv3rcgTPF405s5 - OXKDPAxWSulzt/jqAesYvI27koOsGj0sDsSRLRdmj4HG91Xantnv5rxGqdYHEDPo - j8oo1HQv8vqhDcKUJOKH5j5cWO+W75CpAHuMfgxKJ9WdxPSNpKZoOKIMd2hwd4ng - 2+ulgfvVKcE4PM4YSrtW4qoAoz/+gyfwSoIAQJ0VOuEwL+QFJ8Ud1aJaJRkLD39P - uLEje++rBbfIX9VPCRS/c3gYAOHu66LYI3toTomY8U3YYiQk8bC3Rp9uAjmgI3br - 4DHLwRTEUbOL8CdNcGb1qvO8xBSRzjMIZM8QJHSyYNcM - -----END CERTIFICATE----- - '''), - textwrap.dedent('''\ - -----BEGIN PRIVATE KEY----- - MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCtvPEA2x3/jp0r - iSdgb7TqB9uAobzttYbW9E0+WLqf3sLJJ4F4Iq0AI1YYMtOOwcjmvC52eSaqxoGc - Y4G2J+RgQNR8b8lkm38vRYQA2SkDCtEQFkLiCrkr5g20xh89gCLEr9c5x45p8Pl7 - q2LmE6wVIVjWqTSiYo4TMD8Nb5LN3vPeM7+fwV7FZDH7PJ4AT1/kTJjhkK0wiOGe - TLEW5wiSYO8QMD0rJHfzAp8UPFsVK8InZTjLS4VJgI0OlG2Von7Nv7Wtxsg5hi7d - kLu2tawHE8DD97O5zhVTZHzBiDF1mrjR3+6RWgn8iF6353UV9hbyPYz51UiCEYHB - wFtqQaBlAgMBAAECggEAFNc827rtIspDPUUzFYTg4U/2+zurk6I6Xg+pMmjnXiUV - HZchFz2lngYfHkD+krnZNSBuvGR1CHhOdOmU1jp70TYFpzWrpWdnvs5qcsWZ/1Tt - Vi4tcLsTkloC2+QGPFTiFtD3EuXGxhuTecvJzcqfUluRMhLTDwWegFvBvIVdSVeZ - 9XFDZF9O748tdt2PhYcL2L/xDz4sIz89ek4P1v4raB52rcleIduqMat29crVR3ex - VsZK3PLW6HCquUQvdvjLblfzjDS1pqcpIiSsYCrP0eEEKrrg44V8VjcPxXIg4GAE - ioDOpi9vO/3xyxYxXBtlD2o6c9kZUrp+xxx9jztdIQKBgQDo8witC33Z7Rd6dLm9 - zgN/wZ2lWqE927fXZBExKjCXZ+A3N58One0TR2qI9S+BRVc2KOCWFGUjnHbx1PfE - xU1UNDY+ir9Lqk+rzhyEk4vst/IwhyovmAhL5fONqlfxB+l29cUh6JIYMtqaWYvj - AbmS5YhZRMa3kI/BtCTRJtPecQKBgQC+7f57XWt7HNe7FvrDTz5M8AmQ7y487NxZ - OcZ1+YKJ57PVY7G7Ye3xqRTd05L6h1P1eCO0gLDiSy5VOz47uFdNcD/9Ia+Ng2oq - P8TC36b86dz3ZDhBm4AB3shaD/JBjUQ0NwLosmrMaDF+lVu8NPA60eeQ70/RgbSA - KNrOUH1DNQKBgQDicOzsGZGat6fs925enNY16CWwSOsYUG7ix3kWy6Y0Z1tDEaRh - 9w4vgWqD+6LUDG18TjwSZ3zxIvVUmurGsew7gA2Cuii+Cq4rmc2K6kpIL38TwTA2 - 15io/rzD5uRZfpFpe/rGvWbWcwigpY8fedvEea8S55IrejDj4JMxZIbrYQKBgQCG - Ke68+XRhWm8thIRJYhHBNptCQRAYt8hO2o5esCnOhgaUWC24IqR1P/7tsZKCgT26 - K+XLHPMu0O2J7stYY7zVKZ+NXHJj2ohrj8vPtCE/b4ZaQQ5W69ITfl0DDFmLPp1C - o7Vjlpv9bun4rTN9GSYF7yHtcnyAF8iilhLLDzw2UQKBgQC4FzI6/P2HcUNzf+/m - AThk8+4V35gOSxn3uk48CXNStcCoLMEeXM69SGYq8GaGU/piaog9D8RvF4yMAnnL - wNpy8J/4ldluyidX61N0dMS+NL4l4TPjTvOY22KzjwfnBoqzg+93Mt//M4HfR/ka - 3EWl5VmzbuEeytrcH3uHAUpkKg== - -----END PRIVATE KEY----- - '''), - True, - ), - ( - textwrap.dedent('''\ - -----BEGIN CERTIFICATE----- - MIIEDTCCAvWgAwIBAgIEAKWUWTANBgkqhkiG9w0BAQsFADBVMQswCQYDVQQGEwJV - UzEMMAoGA1UECAwDdXNhMRMwEQYDVQQHDApjYWxpZm9ybmlhMQswCQYDVQQKDAJs - bTEWMBQGCSqGSIb3DQEJARYHYUBiLmNvbTAeFw0yMzA0MDYxNjQyMTJaFw0yNDA1 - MDcxNjQyMTJaME4xCzAJBgNVBAYTAlVTMQwwCgYDVQQIDAN1c2ExDDAKBgNVBAcM - A3VzYTELMAkGA1UECgwCbG0xFjAUBgkqhkiG9w0BCQEWB2FAYy5jb20wggEiMA0G - CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtvPEA2x3/jp0riSdgb7TqB9uAobzt - tYbW9E0+WLqf3sLJJ4F4Iq0AI1YYMtOOwcjmvC52eSaqxoGcY4G2J+RgQNR8b8lk - m38vRYQA2SkDCtEQFkLiCrkr5g20xh89gCLEr9c5x45p8Pl7q2LmE6wVIVjWqTSi - Yo4TMD8Nb5LN3vPeM7+fwV7FZDH7PJ4AT1/kTJjhkK0wiOGeTLEW5wiSYO8QMD0r - JHfzAp8UPFsVK8InZTjLS4VJgI0OlG2Von7Nv7Wtxsg5hi7dkLu2tawHE8DD97O5 - zhVTZHzBiDF1mrjR3+6RWgn8iF6353UV9hbyPYz51UiCEYHBwFtqQaBlAgMBAAGj - geswgegwDgYDVR0RBAcwBYIDYWJjMB0GA1UdDgQWBBSRzlS66ts6rhuCN+4VK2x7 - 8E+n1zAMBgNVHRMBAf8EAjAAMIGABgNVHSMEeTB3gBR1fZ31S5XHrijsT/C9fzbB - aqrg5qFZpFcwVTELMAkGA1UEBhMCVVMxDDAKBgNVBAgMA3VzYTETMBEGA1UEBwwK - Y2FsaWZvcm5pYTELMAkGA1UECgwCbG0xFjAUBgkqhkiG9w0BCQEWB2FAYi5jb22C - BACllFgwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwIwDgYDVR0PAQH/BAQDAgOIMA0G - CSqGSIb3DQEBCwUAA4IBAQA7UwYNr6gspgRcCGwzl5RUAL/N3NXv3rcgTPF405s5 - OXKDPAxWSulzt/jqAesYvI27koOsGj0sDsSRLRdmj4HG91Xantnv5rxGqdYHEDPo - j8oo1HQv8vqhDcKUJOKH5j5cWO+W75CpAHuMfgxKJ9WdxPSNpKZoOKIMd2hwd4ng - 2+ulgfvVKcE4PM4YSrtW4qoAoz/+gyfwSoIAQJ0VOuEwL+QFJ8Ud1aJaJRkLD39P - uLEje++rBbfIX9VPCRS/c3gYAOHu66LYI3toTomY8U3YYiQk8bC3Rp9uAjmgI3br - 4DHLwRTEUbOL8CdNcGb1qvO8xBSRzjMIZM8QJHSyYNcM - -----END CERTIFICATE----- - '''), - textwrap.dedent('''\ - -----BEGIN PRIVATE KEY----- - MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDVMPccUqq6jd8h - h0ybrwRkvK+pvOJze00IK7F6A8RRyCwDL2Yc0GpWR5ecY+jBiZ1n+TfKfaybdKR0 - 0hhFFuU74JTsUk298hI1GVBNvwbimgraQciWjg0wDjHAN7AFZL8Jb/Tn7/DZlmn+ - TgqdPaFIeD4XnLX6zwrc4VemKYDDcdr5JyDVCt3ZtqTEbbtxQ4WvZbtCxlzlkyJu - xwdmGyCvjkQri55+FaejvnPCUzJSOK28jShBuZCIS3lR7HCcAS4cc05TTrWSZr+i - brLISVEz1XASc0pKz8QGMuz5Hk5uNRLl4JGmWZrSV9lqtFYP9hatpLi5mnhWpgYi - Q0IXvNUXAgMBAAECggEAdbgf+0e6dmC4gO8Q4jZ2GpoF9ZgTAulm08gsq89ArFf3 - 1ZpqrCZ5UUMe+IBCmfu/KxZ2NB3JHd3+oXMRa7UEx1dvZD7eJrBwVVmw+f0tdBrT - O0lv1ZKCvbJYzmbxj0jeI/vqI9heCggAZyf4vHK3iCi9QJSL9/4zZVwY5eus6j4G - RCMXW8ZqiKX3GLtCjPmZilYQHNDbsfAbqy75AsG81fgaKkYkJS29rte9R34BajZs - OFm+y6nIe6zsf0vhn/yPVN4Yhuu/WhkvqouR2NhSF7ulXckuR/ef55GPpbRcpSOj - VUkwJL3wsHPozvmcks/TnZbqj0u7XBGjZ2VK8sF+gQKBgQDsJGMeeaua5pOITVHk - reHaxy4tLs1+98++L9SffBbsQcCu4OdgMBizCXuUw9bHlMx19B/B56cJst239li3 - dHfC/mF4/8em5XOx97FyC0rF02qYCPXViTrTSovSEWHuM/ChmhaRlZdp5F4EBMp7 - ELdf4OBCHGz47UCLQF75/FPtJwKBgQDnHn9HuFepY+yV1sNcPKj1GfciaseKzTk1 - Iw5VVtqyS2p8vdXNUiJmaF0245S3phRBL6PDhdfd3SwMmNYvhTYsqBc6ZRHO4b9J - SjmHct63286NuEn0piYaa3MZ8sV/xI0a5leAdkzyqPTCcn0HlvDL0HTV34umdmfj - kqC4jsWukQKBgC48cavl5tPNkdV+TiqYYUCU/1WZdGMH4oU6mEch5NsdhLy5DJSo - 1i04DhpyvfsWB3KQ+ibdVLdxbjg24+gHxetII42th0oGY0DVXskVrO5PFu/t0TSe - SgZU8kuPW71oLhV2NjULNTpmnIHs7jhqbX04arCHIE8dJSYe1HneDhDBAoGBALTk - 4txgxYQYaNFykd/8voVwuETg7KOQM0mK0aor2+qXKpbOAqy8r54V63eNsxX20H2g - 6v2bIbVOai7F5Ua2bguP2PZkqwaRHKYhiVuhpf6j9UxpRMFO1h3xodpacQiq74Jx - bWVnspxvb3tOHtw04O21j+ziFizJGlE9r7wkS0dxAoGAeq/Ecb+nJp/Ce4h5US1O - 4rruiLLYMkcFGmhSMcQ+lVbGOn4eSpqrGWn888Db2oiu7mv+u0TK9ViXwHkfp4FP - Hnm0S8e25py1Lj+bk1tH0ku1I8qcAtihYBtSwPGj+66Qyr8KOlxZP2Scvcqu+zBc - cyhsrrlRc3Gky9L5gtdxdeo= - -----END PRIVATE KEY----- - '''), - False, - ), - ( - textwrap.dedent('''\ - -----BEGIN CERTIFICATE----- - ntnv5rxGqdYHEDPo - j8oo1HQv8vqhDcKUJOKH5j5cWO+W75CpAHuMfgxKJ9WdxPSNpKZoOKIMd2hwd4ng - 2+ulgfvVKcE4PM4YSrtW4qoAoz/+gyfwSoIAQJ0VOuEwL+QFJ8Ud1aJaJRkLD39P - uLEje++rBbfIX9VPCRS/c3gYAOHu66LYI3toTomY8U3YYiQk8bC3Rp9uAjmgI3br - 4DHLwRTEUbOL8CdNcGb1qvO8xBSRzjMIZM8QJHSyYNcM - -----END CERTIFICATE----- - '''), - textwrap.dedent('''\ - -----BEGIN PRIVATE KEY----- - MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDVMPccUqq6jd8h - h0ybrwRkvK+pvOJze00IK7F6A8RRyCwDL2Yc0GpWR5ecY+jBiZ1n+TfKfaybdKR0 - 0hhFFuU74JTsUk298hI1GVBNvwbimgraQciWjg0wDjHAN7AFZL8Jb/Tn7/DZlmn+ - TgqdPaFIeD4XnLX6zwrc4VemKYDDcdr5JyDVCt3ZtqTEbbtxQ4WvZbtCxlzlkyJu - xwdmGyCvjkQri55+FaejvnPCUzJSOK28jShBuZCIS3lR7HCcAS4cc05TTrWSZr+i - brLISVEz1XASc0pKz8QGMuz5Hk5uNRLl4JGmWZrSV9lqtFYP9hatpLi5mnhWpgYi - Q0IXvNUXAgMBAAECggEAdbgf+0e6dmC4gO8Q4jZ2GpoF9ZgTAulm08gsq89ArFf3 - 1ZpqrCZ5UUMe+IBCmfu/KxZ2NB3JHd3+oXMRa7UEx1dvZD7eJrBwVVmw+f0tdBrT - O0lv1ZKCvbJYzmbxj0jeI/vqI9heCggAZyf4vHK3iCi9QJSL9/4zZVwY5eus6j4G - RCMXW8ZqiKX3GLtCjPmZilYQHNDbsfAbqy75AsG81fgaKkYkJS29rte9R34BajZs - OFm+y6nIe6zsf0vhn/yPVN4Yhuu/WhkvqouR2NhSF7ulXckuR/ef55GPpbRcpSOj - VUkwJL3wsHPozvmcks/TnZbqj0u7XBGjZ2VK8sF+gQKBgQDsJGMeeaua5pOITVHk - reHaxy4tLs1+98++L9SffBbsQcCu4OdgMBizCXuUw9bHlMx19B/B56cJst239li3 - dHfC/mF4/8em5XOx97FyC0rF02qYCPXViTrTSovSEWHuM/ChmhaRlZdp5F4EBMp7 - ELdf4OBCHGz47UCLQF75/FPtJwKBgQDnHn9HuFepY+yV1sNcPKj1GfciaseKzTk1 - Iw5VVtqyS2p8vdXNUiJmaF0245S3phRBL6PDhdfd3SwMmNYvhTYsqBc6ZRHO4b9J - SjmHct63286NuEn0piYaa3MZ8sV/xI0a5leAdkzyqPTCcn0HlvDL0HTV34umdmfj - kqC4jsWukQKBgC48cavl5tPNkdV+TiqYYUCU/1WZdGMH4oU6mEch5NsdhLy5DJSo - 1i04DhpyvfsWB3KQ+ibdVLdxbjg24+gHxetII42th0oGY0DVXskVrO5PFu/t0TSe - SgZU8kuPW71oLhV2NjULNTpmnIHs7jhqbX04arCHIE8dJSYe1HneDhDBAoGBALTk - 4txgxYQYaNFykd/8voVwuETg7KOQM0mK0aor2+qXKpbOAqy8r54V63eNsxX20H2g - 6v2bIbVOai7F5Ua2bguP2PZkqwaRHKYhiVuhpf6j9UxpRMFO1h3xodpacQiq74Jx - bWVnspxvb3tOHtw04O21j+ziFizJGlE9r7wkS0dxAoGAeq/Ecb+nJp/Ce4h5US1O - 4rruiLLYMkcFGmhSMcQ+lVbGOn4eSpqrGWn888Db2oiu7mv+u0TK9ViXwHkfp4FP - Hnm0S8e25py1Lj+bk1tH0ku1I8qcAtihYBtSwPGj+66Qyr8KOlxZP2Scvcqu+zBc - cyhsrrlRc3Gky9L5gtdxdeo= - -----END PRIVATE KEY----- - '''), - False, - ) -], ids=['valid_cert', 'invalid_cert', 'invalid_cert']) -def test_importing_certificate_validation(certificate, private_key, should_work): - cert_params = {'certificate': certificate, 'privatekey': private_key} - if should_work: - cert = None - if should_work: - try: - cert = call( - 'certificate.create', { - 'name': 'test-cert', - 'create_type': 'CERTIFICATE_CREATE_IMPORTED', - **cert_params, - }, job=True - ) - assert cert['parsed'] is True, cert - finally: - if cert: - call('certificate.delete', cert['id'], job=True) - - else: - with pytest.raises(ValidationErrors): - call( - 'certificate.create', { - 'name': 'test-cert', - 'create_type': 'CERTIFICATE_CREATE_IMPORTED', - **cert_params, - }, job=True - ) diff --git a/tests/api2/test_client_job.py b/tests/api2/test_client_job.py deleted file mode 100644 index 04ad2df0b1525..0000000000000 --- a/tests/api2/test_client_job.py +++ /dev/null @@ -1,36 +0,0 @@ -import pprint -import time - -import pytest - -from middlewared.test.integration.utils import client, mock - - -# FIXME: Sometimes an equal message for `SUCCESS` state is being sent (or received) twice, we were not able -# to understand why and this does not break anything so we are not willing to waste our time investigating -# this. -# Also, `RUNNING` message sometimes is not received, this does not have a logical explanation as well and is not -# repeatable. -@pytest.mark.flaky(reruns=5, reruns_delay=5) -def test_client_job_callback(): - with mock("test.test1", """ - from middlewared.service import job - - @job() - def mock(self, job, *args): - import time - time.sleep(2) - return 42 - """): - with client() as c: - results = [] - - c.call("test.test1", job=True, callback=lambda job: results.append(job.copy())) - - # callback is called in a separate thread, allow it to settle - time.sleep(2) - - assert len(results) == 2, pprint.pformat(results, indent=2) - assert results[0]['state'] == 'RUNNING' - assert results[1]['state'] == 'SUCCESS' - assert results[1]['result'] == 42 diff --git a/tests/api2/test_cloud_backup.py b/tests/api2/test_cloud_backup.py deleted file mode 100644 index b35e159562bd0..0000000000000 --- a/tests/api2/test_cloud_backup.py +++ /dev/null @@ -1,271 +0,0 @@ -import os -import types - -import boto3 -import pytest - -from truenas_api_client import ClientException -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.cloud_backup import task, run_task -from middlewared.test.integration.assets.cloud_sync import credential -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils.call import call -from middlewared.test.integration.utils.mock import mock -from middlewared.test.integration.utils.ssh import ssh - -try: - from config import ( - AWS_ACCESS_KEY_ID, - AWS_SECRET_ACCESS_KEY, - AWS_BUCKET, - ) -except ImportError: - pytestmark = pytest.mark.skip(reason="AWS credential are missing in config.py") - - -def clean(): - s3 = boto3.Session( - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY, - ).resource("s3") - bucket = s3.Bucket(AWS_BUCKET) - bucket.objects.filter(Prefix="cloud_backup/").delete() - - -@pytest.fixture(scope="module") -def s3_credential(): - with credential({ - "provider": "S3", - "attributes": { - "access_key_id": AWS_ACCESS_KEY_ID, - "secret_access_key": AWS_SECRET_ACCESS_KEY, - }, - }) as c: - yield c - - -@pytest.fixture(scope="function") -def cloud_backup_task(s3_credential): - clean() - - with dataset("cloud_backup") as local_dataset: - with task({ - "path": f"/mnt/{local_dataset}", - "credentials": s3_credential["id"], - "attributes": { - "bucket": AWS_BUCKET, - "folder": "cloud_backup", - }, - "password": "test", - "keep_last": 100, - }) as t: - yield types.SimpleNamespace( - local_dataset=local_dataset, - task=t, - ) - - -def test_cloud_backup(cloud_backup_task): - assert call("cloud_backup.list_snapshots", cloud_backup_task.task["id"]) == [] - - ssh(f"dd if=/dev/urandom of=/mnt/{cloud_backup_task.local_dataset}/blob1 bs=1M count=1") - run_task(cloud_backup_task.task) - - logs = ssh("cat " + call("cloud_backup.get_instance", cloud_backup_task.task["id"])["job"]["logs_path"]) - assert "Files: 1 new, 0 changed, 0 unmodified" in logs - - snapshots = call("cloud_backup.list_snapshots", cloud_backup_task.task["id"]) - assert len(snapshots) == 1 - assert (snapshots[0]["time"] - call("system.info")["datetime"]).total_seconds() < 300 - assert snapshots[0]["paths"] == [f"/mnt/{cloud_backup_task.local_dataset}"] - first_snapshot = snapshots[0] - - ssh(f"mkdir /mnt/{cloud_backup_task.local_dataset}/dir1") - ssh(f"dd if=/dev/urandom of=/mnt/{cloud_backup_task.local_dataset}/dir1/blob2 bs=1M count=1") - - run_task(cloud_backup_task.task) - - logs = ssh("cat " + call("cloud_backup.get_instance", cloud_backup_task.task["id"])["job"]["logs_path"]) - assert "Files: 1 new, 0 changed, 1 unmodified" in logs - - snapshots = call("cloud_backup.list_snapshots", cloud_backup_task.task["id"]) - assert len(snapshots) == 2 - - contents = call( - "cloud_backup.list_snapshot_directory", - cloud_backup_task.task["id"], - snapshots[-1]["id"], - f"/mnt/{cloud_backup_task.local_dataset}", - ) - assert len(contents) == 3 - assert contents[0]["name"] == "cloud_backup" - assert contents[1]["name"] == "blob1" - assert contents[2]["name"] == "dir1" - - call("cloud_backup.update", cloud_backup_task.task["id"], {"keep_last": 2}) - - run_task(cloud_backup_task.task) - - snapshots = call("cloud_backup.list_snapshots", cloud_backup_task.task["id"]) - assert all(snapshot["id"] != first_snapshot["id"] for snapshot in snapshots) - - snapshot_to_delete = snapshots[0] - call("cloud_backup.delete_snapshot", cloud_backup_task.task["id"], snapshot_to_delete["id"], job=True) - - snapshots = call("cloud_backup.list_snapshots", cloud_backup_task.task["id"]) - assert all(snapshot["id"] != snapshot_to_delete["id"] for snapshot in snapshots) - - -@pytest.fixture(scope="module") -def completed_cloud_backup_task(s3_credential): - clean() - - with dataset("completed_cloud_backup") as local_dataset: - ssh(f"mkdir /mnt/{local_dataset}/dir1") - ssh(f"touch /mnt/{local_dataset}/dir1/file1") - ssh(f"mkdir /mnt/{local_dataset}/dir2") - ssh(f"touch /mnt/{local_dataset}/dir2/file2") - ssh(f"mkdir /mnt/{local_dataset}/dir3") - ssh(f"touch /mnt/{local_dataset}/dir3/file3") - - with task({ - "path": f"/mnt/{local_dataset}", - "credentials": s3_credential["id"], - "attributes": { - "bucket": AWS_BUCKET, - "folder": "cloud_backup", - }, - "password": "test", - "keep_last": 100, - }) as t: - run_task(t) - - snapshot = call("cloud_backup.list_snapshots", t["id"])[0] - - yield types.SimpleNamespace( - local_dataset=local_dataset, - task=t, - snapshot=snapshot, - ) - - -@pytest.mark.parametrize("options,result", [ - ({}, ["dir1/file1", "dir2/file2", "dir3/file3"]), - ({"include": ["dir1", "dir2"]}, ["dir1/file1", "dir2/file2"]), - ({"exclude": ["dir2", "dir3"]}, ["dir1/file1"]), -]) -def test_cloud_backup_restore(completed_cloud_backup_task, options, result): - with dataset("restore") as restore: - call( - "cloud_backup.restore", - completed_cloud_backup_task.task["id"], - completed_cloud_backup_task.snapshot["id"], - f"/mnt/{completed_cloud_backup_task.local_dataset}", - f"/mnt/{restore}", - options, - job=True, - ) - - assert sorted([ - os.path.relpath(path, f"/mnt/{restore}") - for path in ssh(f"find /mnt/{restore} -type f").splitlines() - ]) == result - - -@pytest.fixture(scope="module") -def zvol(): - with dataset("cloud_backup_zvol", {"type": "VOLUME", "volsize": 1024 * 1024}) as zvol: - path = f"/dev/zvol/{zvol}" - ssh(f"dd if=/dev/urandom of={path} bs=1M count=1") - - yield path - - -def test_zvol_cloud_backup(s3_credential, zvol): - clean() - - with mock("cloud_backup.validate_zvol", return_value=None): - with task({ - "path": zvol, - "credentials": s3_credential["id"], - "attributes": { - "bucket": AWS_BUCKET, - "folder": "cloud_backup", - }, - "password": "test", - "keep_last": 100, - }) as t: - run_task(t) - - -def test_zvol_cloud_backup_create_time_validation(s3_credential, zvol): - clean() - - with pytest.raises(ValidationErrors) as ve: - with task({ - "path": zvol, - "credentials": s3_credential["id"], - "attributes": { - "bucket": AWS_BUCKET, - "folder": "cloud_backup", - }, - "password": "test", - "keep_last": 100, - }): - pass - - assert "cloud_backup_create.path" in ve.value - - -def test_zvol_cloud_backup_runtime_validation(s3_credential, zvol): - clean() - - m = mock("cloud_backup.validate_zvol", return_value=None) - m.__enter__() - exited = False - try: - with task({ - "path": zvol, - "credentials": s3_credential["id"], - "attributes": { - "bucket": AWS_BUCKET, - "folder": "cloud_backup", - }, - "password": "test", - "keep_last": 100, - }) as t: - m.__exit__(None, None, None) - exited = True - - with pytest.raises(ClientException): - run_task(t) - finally: - if not exited: - m.__exit__(None, None, None) - - -def test_create_to_backend_with_a_different_password(cloud_backup_task): - with pytest.raises(ValidationErrors) as ve: - with task({ - "path": cloud_backup_task.task["path"], - "credentials": cloud_backup_task.task["credentials"]["id"], - "attributes": cloud_backup_task.task["attributes"], - "password": "test2", - "keep_last": 100, - }): - pass - - assert "cloud_backup_create.password" in ve.value - - -def test_update_with_incorrect_password(cloud_backup_task): - with pytest.raises(ValidationErrors) as ve: - call("cloud_backup.update", cloud_backup_task.task["id"], {"password": "test2"}) - - assert "cloud_backup_update.password" in ve.value - - -def test_sync_initializes_repo(cloud_backup_task): - clean() - - call("cloud_backup.sync", cloud_backup_task.task["id"], job=True) diff --git a/tests/api2/test_cloud_sync.py b/tests/api2/test_cloud_sync.py deleted file mode 100644 index fcfae11a9f051..0000000000000 --- a/tests/api2/test_cloud_sync.py +++ /dev/null @@ -1,239 +0,0 @@ -import re -import time - -import pytest -from middlewared.test.integration.assets.cloud_sync import ( - credential, task, local_ftp_credential, local_ftp_task, run_task, -) -from middlewared.test.integration.assets.ftp import anonymous_ftp_server, ftp_server_with_user_account -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, pool, ssh -from middlewared.test.integration.utils.client import truenas_server - -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) -from auto_config import ha - - -def test_include(): - with local_ftp_task({ - "include": ["/office/**", "/work/**"], - }) as task: - ssh(f'mkdir {task["path"]}/office') - ssh(f'touch {task["path"]}/office/paper') - ssh(f'mkdir {task["path"]}/work') - ssh(f'touch {task["path"]}/work/code') - ssh(f'mkdir {task["path"]}/games') - ssh(f'touch {task["path"]}/games/minecraft') - ssh(f'touch {task["path"]}/fun') - - run_task(task) - - assert ssh(f'ls /mnt/{pool}/cloudsync_remote') == 'office\nwork\n' - - -def test_exclude_recycle_bin(): - with local_ftp_task({ - "exclude": ["$RECYCLE.BIN/"], - }) as task: - ssh(f'mkdir {task["path"]}/\'$RECYCLE.BIN\'') - ssh(f'touch {task["path"]}/\'$RECYCLE.BIN\'/garbage') - ssh(f'touch {task["path"]}/file') - - run_task(task) - - assert ssh(f'ls /mnt/{pool}/cloudsync_remote') == 'file\n' - - -@pytest.mark.flaky(reruns=5, reruns_delay=5) -@pytest.mark.parametrize("anonymous", [True, False]) -@pytest.mark.parametrize("defaultroot", [True, False]) -@pytest.mark.parametrize("has_leading_slash", [True, False]) -def test_ftp_subfolder(anonymous, defaultroot, has_leading_slash): - with dataset("cloudsync_local") as local_dataset: - config = {"defaultroot": defaultroot} - with (anonymous_ftp_server if anonymous else ftp_server_with_user_account)(config) as ftp: - remote_dataset = ftp.dataset - ssh(f"touch /mnt/{remote_dataset}/bad-file") - ssh(f"mkdir /mnt/{remote_dataset}/data") - ssh(f"touch /mnt/{remote_dataset}/data/another-bad-file") - ssh(f"mkdir /mnt/{remote_dataset}/data/child") - ssh(f"touch /mnt/{remote_dataset}/data/child/good-file") - - with credential({ - "name": "Test", - "provider": "FTP", - "attributes": { - "host": "localhost", - "port": 21, - "user": ftp.username, - "pass": ftp.password, - }, - }) as c: - folder = f"{'/' if has_leading_slash else ''}data/child" - if not anonymous and not defaultroot: - # We have access to the FTP server root directory - if has_leading_slash: - # A path with a leading slash should be complete path in this case - folder = f"/mnt/{ftp.dataset}/data/child" - - with task({ - "direction": "PULL", - "transfer_mode": "MOVE", - "path": f"/mnt/{local_dataset}", - "credentials": c["id"], - "attributes": { - "folder": folder, - }, - }) as t: - run_task(t) - - assert ssh(f'ls /mnt/{local_dataset}') == 'good-file\n' - - -@pytest.mark.parametrize("has_zvol_sibling", [True, False]) -def test_snapshot(has_zvol_sibling): - with dataset("test_cloudsync_snapshot") as ds: - ssh(f"mkdir -p /mnt/{ds}/dir1/dir2") - ssh(f"dd if=/dev/urandom of=/mnt/{ds}/dir1/dir2/blob bs=1M count=1") - - if has_zvol_sibling: - ssh(f"zfs create -V 1gb {pool}/zvol") - - try: - with local_ftp_task({ - "path": f"/mnt/{ds}/dir1/dir2", - "bwlimit": [{"time": "00:00", "bandwidth": 1024 * 200}], # So it'll take 5 seconds - "snapshot": True, - }) as task: - job_id = call("cloudsync.sync", task["id"]) - - time.sleep(2.5) - - ps_ax = ssh("ps ax | grep rclone") - - call("core.job_wait", job_id, job=True) - - assert re.search(rf"rclone .+ /mnt/{ds}/.zfs/snapshot/cloud_sync-[0-9]+-[0-9]+/dir1/dir2", ps_ax) - - time.sleep(1) - - assert call("zfs.snapshot.query", [["dataset", "=", ds]]) == [] - finally: - if has_zvol_sibling: - ssh(f"zfs destroy -r {pool}/zvol") - - -def test_sync_onetime(): - with dataset("cloudsync_local") as local_dataset: - with local_ftp_credential() as c: - call("cloudsync.sync_onetime", { - "direction": "PUSH", - "transfer_mode": "COPY", - "path": f"/mnt/{local_dataset}", - "credentials": c["id"], - "attributes": { - "folder": "", - }, - }, job=True) - - -def test_abort(): - with dataset("test_cloudsync_abort") as ds: - ssh(f"dd if=/dev/urandom of=/mnt/{ds}/blob bs=1M count=1") - - with local_ftp_task({ - "path": f"/mnt/{ds}", - "bwlimit": [{"time": "00:00", "bandwidth": 1024 * 100}], # So it'll take 10 seconds - }) as task: - job_id = call("cloudsync.sync", task["id"]) - - time.sleep(2.5) - - call("core.job_abort", job_id) - - for i in range(10): - time.sleep(1) - state = call("cloudsync.query", [["id", "=", task["id"]]], {"get": True})["job"]["state"] - if state == "RUNNING": - continue - elif state == "ABORTED": - break - else: - assert False, f"Cloud sync task is {state}" - else: - assert False, "Cloud sync task was not aborted" - - assert "rclone" not in ssh("ps ax") - - -@pytest.mark.flaky(reruns=5, reruns_delay=5) -@pytest.mark.parametrize("create_empty_src_dirs", [True, False]) -def test_create_empty_src_dirs(create_empty_src_dirs): - with dataset("cloudsync_local") as local_dataset: - ssh(f"mkdir /mnt/{local_dataset}/empty-dir") - ssh(f"mkdir /mnt/{local_dataset}/non-empty-dir") - ssh(f"touch /mnt/{local_dataset}/non-empty-dir/file") - - with anonymous_ftp_server() as ftp: - with credential({ - "name": "Test", - "provider": "FTP", - "attributes": { - "host": "localhost", - "port": 21, - "user": ftp.username, - "pass": ftp.password, - }, - }) as c: - with task({ - "direction": "PUSH", - "transfer_mode": "SYNC", - "path": f"/mnt/{local_dataset}", - "credentials": c["id"], - "attributes": { - "folder": "", - }, - "create_empty_src_dirs": create_empty_src_dirs, - }) as t: - run_task(t) - - if create_empty_src_dirs: - assert ssh(f'ls /mnt/{ftp.dataset}') == 'empty-dir\nnon-empty-dir\n' - else: - assert ssh(f'ls /mnt/{ftp.dataset}') == 'non-empty-dir\n' - - -def test_state_persist(): - with dataset("test_cloudsync_state_persist") as ds: - with local_ftp_task({ - "path": f"/mnt/{ds}", - }) as task: - call("cloudsync.sync", task["id"], job=True) - - row = call("datastore.query", "tasks.cloudsync", [["id", "=", task["id"]]], {"get": True}) - assert row["job"]["state"] == "SUCCESS" - - -if ha: - def test_state_failover(): - assert call("failover.status") == "MASTER" - - ha_ips = truenas_server.ha_ips() - - with dataset("test_cloudsync_state_failover") as ds: - with local_ftp_task({"path": f"/mnt/{ds}"}) as task: - call("cloudsync.sync", task["id"], job=True) - time.sleep(5) # Job sending is not synchronous, allow it to propagate - - file1_path = call("cloudsync.get_instance", task["id"])["job"]["logs_path"] - file1_contents = ssh(f'cat {file1_path}', ip=ha_ips['active']) - assert file1_contents - - file2_path = call("failover.call_remote", "cloudsync.get_instance", [task["id"]])["job"]["logs_path"] - file2_contents = ssh(f'cat {file2_path}', ip=ha_ips['standby']) - assert file2_contents - - assert file1_contents == file2_contents diff --git a/tests/api2/test_cloud_sync_config.py b/tests/api2/test_cloud_sync_config.py deleted file mode 100644 index 5977bbf6499a7..0000000000000 --- a/tests/api2/test_cloud_sync_config.py +++ /dev/null @@ -1,34 +0,0 @@ -import time - -from middlewared.test.integration.assets.cloud_sync import credential, task -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.mock_rclone import mock_rclone - - -def test_rclone_config_writer_bool(): - with dataset("test_cloud_sync_config") as ds: - with credential({ - "name": "Google Cloud Storage", - "provider": "GOOGLE_CLOUD_STORAGE", - "attributes": { - "service_account_credentials": "{\"project_id\": 1}", - }, - }) as c: - with task({ - "direction": "PUSH", - "transfer_mode": "COPY", - "path": f"/mnt/{ds}", - "credentials": c["id"], - "attributes": { - "bucket": "bucket", - "folder": "", - "bucket_policy_only": True, - }, - }) as t: - with mock_rclone() as mr: - call("cloudsync.sync", t["id"]) - - time.sleep(2.5) - - assert mr.result["config"]["remote"]["bucket_policy_only"] == "true" diff --git a/tests/api2/test_cloud_sync_credentials.py b/tests/api2/test_cloud_sync_credentials.py deleted file mode 100644 index 3123e56c81678..0000000000000 --- a/tests/api2/test_cloud_sync_credentials.py +++ /dev/null @@ -1,13 +0,0 @@ -from middlewared.test.integration.assets.cloud_sync import local_ftp_credential_data -from middlewared.test.integration.utils import call - - -def test_verify_cloud_credential(): - with local_ftp_credential_data() as data: - assert call("cloudsync.credentials.verify", data)["valid"] - - -def test_verify_cloud_credential_fail(): - with local_ftp_credential_data() as data: - data["attributes"]["user"] = "root" - assert not call("cloudsync.credentials.verify", data)["valid"] diff --git a/tests/api2/test_cloud_sync_crud.py b/tests/api2/test_cloud_sync_crud.py deleted file mode 100644 index d21184fc5b12b..0000000000000 --- a/tests/api2/test_cloud_sync_crud.py +++ /dev/null @@ -1,88 +0,0 @@ -import pytest - -from middlewared.service_exception import CallError -from middlewared.test.integration.assets.cloud_sync import credential as _credential, task as _task -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, ssh - -try: - from config import ( - AWS_ACCESS_KEY_ID, - AWS_SECRET_ACCESS_KEY, - AWS_BUCKET - ) -except ImportError: - Reason = 'AWS credential are missing in config.py' - pytestmark = pytest.mark.skip(reason=Reason) - - -@pytest.fixture(scope='module') -def credentials(): - with _credential({ - "provider": "S3", - "attributes": { - "access_key_id": AWS_ACCESS_KEY_ID, - "secret_access_key": AWS_SECRET_ACCESS_KEY, - } - }) as c: - yield c - - -@pytest.fixture(scope='module') -def task(credentials): - with dataset("cloudsync_local") as local_dataset: - with _task({ - "direction": "PUSH", - "transfer_mode": "COPY", - "path": f"/mnt/{local_dataset}", - "credentials": credentials["id"], - "attributes": { - "bucket": AWS_BUCKET, - "folder": "", - }, - }) as t: - yield t - - -def test_update_cloud_credentials(credentials): - call("cloudsync.credentials.update", credentials["id"], { - "attributes": { - "access_key_id": "garbage", - "secret_access_key": AWS_SECRET_ACCESS_KEY, - } - }) - - assert call("cloudsync.credentials.get_instance", credentials["id"])["attributes"]["access_key_id"] == "garbage" - - call("cloudsync.credentials.update", credentials["id"], { - "attributes": { - "access_key_id": AWS_ACCESS_KEY_ID, - "secret_access_key": AWS_SECRET_ACCESS_KEY, - }, - }) - - -def test_update_cloud_sync(task): - assert call("cloudsync.update", task["id"], {"direction": "PULL"}) - - -def test_run_cloud_sync(task): - call("cloudsync.sync", task["id"], job=True) - print(ssh(f"ls {task['path']}")) - assert ssh(f"cat {task['path']}/freenas-test.txt") == "freenas-test\n" - - -def test_restore_cloud_sync(task): - restore_task = call("cloudsync.restore", task["id"], { - "transfer_mode": "COPY", - "path": task["path"], - }) - - call("cloudsync.delete", restore_task["id"]) - - -def test_delete_cloud_credentials_error(credentials, task): - with pytest.raises(CallError) as ve: - call("cloudsync.credentials.delete", credentials["id"]) - - assert "This credential is used by cloud sync task" in ve.value.errmsg diff --git a/tests/api2/test_cloud_sync_custom_s3.py b/tests/api2/test_cloud_sync_custom_s3.py deleted file mode 100644 index 25d097d1fc96b..0000000000000 --- a/tests/api2/test_cloud_sync_custom_s3.py +++ /dev/null @@ -1,49 +0,0 @@ -import time - -import pytest - -from middlewared.test.integration.assets.cloud_sync import credential, task -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.mock_rclone import mock_rclone - - -@pytest.mark.parametrize("credential_attributes,result", [ - ( - { - - "endpoint": "s3.fr-par.scw.cloud", - "region": "fr-par", - "skip_region": False, - "signatures_v2": False, - }, - {"region": "fr-par"}, - ) -]) -def test_custom_s3(credential_attributes, result): - with dataset("test_cloudsync_custom_s3") as ds: - with credential({ - "name": "S3", - "provider": "S3", - "attributes": { - "access_key_id": "test", - "secret_access_key": "test", - **credential_attributes, - }, - }) as c: - with task({ - "direction": "PUSH", - "transfer_mode": "COPY", - "path": f"/mnt/{ds}", - "credentials": c["id"], - "attributes": { - "bucket": "bucket", - "folder": "", - }, - }) as t: - with mock_rclone() as mr: - call("cloudsync.sync", t["id"]) - - time.sleep(2.5) - - assert mr.result["config"]["remote"]["region"] == "fr-par" diff --git a/tests/api2/test_cloud_sync_script.py b/tests/api2/test_cloud_sync_script.py deleted file mode 100644 index 5cd789e5b759e..0000000000000 --- a/tests/api2/test_cloud_sync_script.py +++ /dev/null @@ -1,63 +0,0 @@ -import pytest - -from truenas_api_client import ClientException -from middlewared.test.integration.assets.cloud_sync import local_ftp_task, run_task -from middlewared.test.integration.utils import call, ssh - - -def test_pre_script_failure(): - with local_ftp_task({ - "pre_script": "echo Custom error\nexit 123", - }) as task: - with pytest.raises(ClientException) as ve: - run_task(task) - - assert ve.value.error == "[EFAULT] Pre-script failed with exit code 123" - - job = call("core.get_jobs", [["method", "=", "cloudsync.sync"]], {"order_by": ["-id"], "get": True}) - assert job["logs_excerpt"] == "[Pre-script] Custom error\n" - - -def test_pre_script_ok(): - ssh("rm /tmp/cloud_sync_test", check=False) - with local_ftp_task({ - "pre_script": "touch /tmp/cloud_sync_test", - }) as task: - run_task(task) - - ssh("cat /tmp/cloud_sync_test") - - -def test_post_script_not_running_after_failure(): - ssh("touch /tmp/cloud_sync_test") - with local_ftp_task({ - "post_script": "rm /tmp/cloud_sync_test", - }) as task: - call("service.stop", "ftp") - - with pytest.raises(ClientException) as ve: - run_task(task) - - assert "connection refused" in ve.value.error - - ssh("cat /tmp/cloud_sync_test") - - -def test_post_script_ok(): - ssh("rm /tmp/cloud_sync_test", check=False) - with local_ftp_task({ - "post_script": "touch /tmp/cloud_sync_test", - }) as task: - run_task(task) - - ssh("cat /tmp/cloud_sync_test") - - -def test_script_shebang(): - with local_ftp_task({ - "post_script": "#!/usr/bin/env python3\nprint('Test' * 2)", - }) as task: - run_task(task) - - job = call("core.get_jobs", [["method", "=", "cloudsync.sync"]], {"order_by": ["-id"], "get": True}) - assert job["logs_excerpt"].endswith("[Post-script] TestTest\n") diff --git a/tests/api2/test_cloud_sync_storj.py b/tests/api2/test_cloud_sync_storj.py deleted file mode 100644 index ae4fac02158c0..0000000000000 --- a/tests/api2/test_cloud_sync_storj.py +++ /dev/null @@ -1,81 +0,0 @@ -import os -import sys - -import pytest - -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.assets.cloud_sync import credential, task, run_task -from middlewared.test.integration.assets.pool import dataset - -apifolder = os.getcwd() -sys.path.append(apifolder) - -pytestmark = pytest.mark.skip(reason='See IT ticket IT-9829') -try: - from config import ( - STORJ_IX_AWS_ACCESS_KEY_ID, - STORJ_IX_AWS_SECRET_ACCESS_KEY, - STORJ_IX_BUCKET, - ) -except ImportError: - pytestmark = pytest.mark.skip(reason='Storj credential are missing in config.py') - STORJ_IX_AWS_ACCESS_KEY_ID = None - STORJ_IX_AWS_SECRET_ACCESS_KEY = None - STORJ_IX_BUCKET = None - -CREDENTIAL = { - "provider": "STORJ_IX", - "attributes": { - "access_key_id": STORJ_IX_AWS_ACCESS_KEY_ID, - "secret_access_key": STORJ_IX_AWS_SECRET_ACCESS_KEY, - } -} -TASK_ATTRIBUTES = { - "bucket": STORJ_IX_BUCKET, - "folder": "", -} - - -def test_storj_verify(): - result = call("cloudsync.credentials.verify", { - "provider": "STORJ_IX", - "attributes": { - "access_key_id": STORJ_IX_AWS_ACCESS_KEY_ID, - "secret_access_key": STORJ_IX_AWS_SECRET_ACCESS_KEY, - } - }) - - assert result["valid"], result - - -@pytest.fixture(scope="module") -def storj_credential(): - with credential(CREDENTIAL) as c: - yield c - - -def test_storj_list_buckets(storj_credential): - assert any(item["Name"] == STORJ_IX_BUCKET for item in call("cloudsync.list_buckets", storj_credential["id"])) - - -def test_storj_list_directory(storj_credential): - result = call("cloudsync.list_directory", { - "credentials": storj_credential["id"], - "attributes": TASK_ATTRIBUTES, - }) - assert len(result) == 1 - assert result[0]["Name"] == "a" - - -def test_storj_sync(storj_credential): - with dataset("test_storj_sync") as ds: - with task({ - "direction": "PULL", - "transfer_mode": "COPY", - "path": f"/mnt/{ds}", - "credentials": storj_credential["id"], - "attributes": TASK_ATTRIBUTES, - }) as t: - run_task(t) - - assert ssh(f"ls /mnt/{ds}") == "a\n" diff --git a/tests/api2/test_config_upload.py b/tests/api2/test_config_upload.py deleted file mode 100644 index 09c7b7e3c6be3..0000000000000 --- a/tests/api2/test_config_upload.py +++ /dev/null @@ -1,60 +0,0 @@ -import contextlib -import io -import json -import sqlite3 -import tarfile -import os - -import pytest - -from truenas_api_client import ClientException -from middlewared.test.integration.utils import call, session, url - - -@contextlib.contextmanager -def db_ops(db_name): - try: - with contextlib.closing(sqlite3.connect(db_name)) as conn: - with conn: - conn.execute("CREATE TABLE alembic_version (version_num VARCHAR(32) NOT NULL);") - conn.execute("INSERT INTO alembic_version VALUES ('invalid')") - yield - finally: - os.unlink(db_name) - - -@contextlib.contextmanager -def tar_ops(file_to_add): - tar_name = "config.tar" - tfile = None - try: - with tarfile.open(tar_name, "w") as tfile: - tfile.add(file_to_add) - yield tfile.name - finally: - if tfile is not None: - os.unlink(tfile.name) - - -def test_invalid_database_file(): - db_name = "freenas-v1.db" - with db_ops(db_name): - with tar_ops(db_name) as tar_name: - with session() as s: - r = s.post( - f"{url()}/_upload", - files={ - "data": (None, io.StringIO(json.dumps({ - "method": "config.upload", - "params": [], - }))), - "file": (None, open(tar_name, "rb")), - }, - ) - r.raise_for_status() - job_id = r.json()["job_id"] - with pytest.raises(ClientException) as ve: - call("core.job_wait", job_id, job=True) - - assert 'Uploaded TrueNAS database file is not valid' in ve.value.error - assert "Can't locate revision identified by 'invalid'" in ve.value.error diff --git a/tests/api2/test_core_bulk.py b/tests/api2/test_core_bulk.py deleted file mode 100644 index bed017ffd96f0..0000000000000 --- a/tests/api2/test_core_bulk.py +++ /dev/null @@ -1,97 +0,0 @@ -from unittest.mock import ANY - -import pytest - -from truenas_api_client import ClientException -from middlewared.test.integration.assets.account import unprivileged_user_client -from middlewared.test.integration.utils import call, mock -from middlewared.test.integration.utils.audit import expect_audit_log - - -def test_core_bulk_reports_job_id(): - with mock("test.test1", """\ - from middlewared.service import job, CallError - - @job() - def mock(self, job, *args): - if args[0] == 0: - raise CallError("Error") - else: - return args[0] - """): - result = call("core.bulk", "test.test1", [[0], [10]], job=True) - - assert result == [ - {"job_id": ANY, "result": None, "error": "[EFAULT] Error"}, - {"job_id": ANY, "result": 10, "error": None}, - ] - - job_0 = call("core.get_jobs", [["id", "=", result[0]["job_id"]]], {"get": True}) - assert job_0["arguments"] == [0] - job_1 = call("core.get_jobs", [["id", "=", result[1]["job_id"]]], {"get": True}) - assert job_1["arguments"] == [10] - - -def test_authorized(): - with unprivileged_user_client(allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c: - with mock("test.test1", """ - from middlewared.service import pass_app - - @pass_app() - async def mock(self, app): - return app.authenticated_credentials.dump()["username"].startswith("unprivileged") - """): - assert c.call("core.bulk", "test.test1", [[]], job=True) == [{"result": True, "error": None}] - - -def test_authorized_audit(): - with unprivileged_user_client(allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c: - with mock("test.test1", """ - from middlewared.schema import Int - from middlewared.service import accepts - - @accepts(Int("param"), audit="Mock", audit_extended=lambda param: str(param)) - async def mock(self, param): - return - """): - with expect_audit_log([ - { - "event": "METHOD_CALL", - "event_data": { - "authenticated": True, - "authorized": True, - "method": "test.test1", - "params": [42], - "description": "Mock 42", - }, - "success": True, - } - ]): - c.call("core.bulk", "test.test1", [[42]], job=True) - - -def test_not_authorized(): - with unprivileged_user_client(allowlist=[]) as c: - with pytest.raises(ClientException) as ve: - c.call("core.bulk", "test.test1", [[]], job=True) - - assert ve.value.error == "[EPERM] Not authorized" - - -def test_not_authorized_audit(): - with unprivileged_user_client() as c: - with expect_audit_log([ - { - "event": "METHOD_CALL", - "event_data": { - "authenticated": True, - "authorized": False, - "method": "user.create", - "params": [{"username": "sergey", "full_name": "Sergey"}], - "description": "Create user sergey", - }, - "success": False, - } - ]): - with pytest.raises(ClientException): - c.call("core.bulk", "user.create", [[{"username": "sergey", "full_name": "Sergey"}]], job=True) diff --git a/tests/api2/test_crud.py b/tests/api2/test_crud.py deleted file mode 100644 index 8d2dc75b0cc63..0000000000000 --- a/tests/api2/test_crud.py +++ /dev/null @@ -1,29 +0,0 @@ -import contextlib -import pytest - -from middlewared.test.integration.assets.privilege import privilege -from middlewared.test.integration.utils import client - - -@pytest.mark.parametrize('offset,limit', [ - (0, 4), - (1, 4), - (2, 4), - (3, 4), - (2, 5), - (3, 5), -]) -def test_query_filters(offset, limit): - with contextlib.ExitStack() as stack: - for i in range(5): - stack.enter_context( - privilege({ - 'name': f'Test Privilege {i}', - 'web_shell': False - }) - ) - with client() as c: - query_results = c.call('privilege.query', [], {'select': ['id']}) - expected_result = query_results[offset:offset + limit] - actual_result = c.call('privilege.query', [], {'offset': offset, 'limit': limit, 'select': ['id']}) - assert actual_result == expected_result diff --git a/tests/api2/test_crud_events.py b/tests/api2/test_crud_events.py deleted file mode 100644 index a8f0868c562bf..0000000000000 --- a/tests/api2/test_crud_events.py +++ /dev/null @@ -1,138 +0,0 @@ -import contextlib -import threading -import typing - -from middlewared.test.integration.assets.crypto import get_cert_params, root_certificate_authority -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.client import client - - -def event_thread(event_endpoint: str, context: dict): - with client(py_exceptions=False) as c: - def cb(mtype, **message): - if len(message) != 3 or not all( - k in message for k in ('id', 'msg', 'collection') - ) or message['collection'] != event_endpoint or message['msg'] not in ( - 'added', 'changed', 'removed' - ): - return - - if context['result'] is None: - context['result'] = message - - context['received_result'].set() - context['shutdown_thread'].set() - - c.subscribe(event_endpoint, cb) - context['subscribed'].set() - context['shutdown_thread'].wait(context['timeout']) - - -@contextlib.contextmanager -def wait_for_event(event_endpoint: str, timeout=60): - context = { - 'subscribed': threading.Event(), - 'result': None, - 'received_result': threading.Event(), - 'shutdown_thread': threading.Event(), - 'timeout': timeout, - } - thread = threading.Thread(target=event_thread, args=(event_endpoint, context), daemon=True) - thread.start() - if not context['subscribed'].wait(30): - raise Exception('Timed out waiting for client to subscribe') - - try: - yield context - if not context['received_result'].wait(timeout): - raise Exception('Event not received') - finally: - context['shutdown_thread'].set() - thread.join(timeout=5) - - -def assert_result(context: dict, event_endpoint: str, oid: typing.Union[int, str], event_type: str) -> None: - assert context['result'] == { - 'msg': event_type, - 'collection': event_endpoint, - 'id': oid, - } - - -def test_event_create_on_non_job_method(): - with wait_for_event('certificateauthority.query') as context: - with root_certificate_authority('root_ca_create_event_test') as root_ca: - assert root_ca['CA_type_internal'] is True, root_ca - - assert_result(context, 'certificateauthority.query', root_ca['id'], 'added') - - -def test_event_create_on_job_method(): - with root_certificate_authority('root_ca_create_event_test') as root_ca: - with wait_for_event('certificate.query') as context: - cert = call('certificate.create', { - 'name': 'cert_test', - 'signedby': root_ca['id'], - 'create_type': 'CERTIFICATE_CREATE_INTERNAL', - **get_cert_params(), - }, job=True) - try: - assert cert['cert_type_internal'] is True, cert - finally: - call('certificate.delete', cert['id'], job=True) - - assert_result(context, 'certificate.query', cert['id'], 'added') - - -def test_event_update_on_non_job_method(): - with root_certificate_authority('root_ca_update_event_test') as root_ca: - assert root_ca['CA_type_internal'] is True, root_ca - - with wait_for_event('certificateauthority.query') as context: - call('certificateauthority.update', root_ca['id'], {}) - - assert_result(context, 'certificateauthority.query', root_ca['id'], 'changed') - - -def test_event_update_on_job_method(): - with wait_for_event('tunable.query'): - tunable = call('tunable.create', { - 'type': 'SYSCTL', - 'var': 'kernel.watchdog', - 'value': '1', - }, job=True) - try: - with wait_for_event('tunable.query') as context: - call('tunable.update', tunable['id'], {'value': '0'}, job=True) - - assert_result(context, 'tunable.query', tunable['id'], 'changed') - finally: - call('tunable.delete', tunable['id'], job=True) - - -def test_event_delete_on_non_job_method(): - root_ca = call('certificateauthority.create', { - **get_cert_params(), - 'name': 'test_root_ca_delete_event', - 'create_type': 'CA_CREATE_INTERNAL', - }) - assert root_ca['CA_type_internal'] is True, root_ca - - with wait_for_event('certificateauthority.query') as context: - call('certificateauthority.delete', root_ca['id']) - - assert_result(context, 'certificateauthority.query', root_ca['id'], 'removed') - - -def test_event_delete_on_job_method(): - with wait_for_event('tunable.query'): - tunable = call('tunable.create', { - 'type': 'SYSCTL', - 'var': 'kernel.watchdog', - 'value': '1', - }, job=True) - - with wait_for_event('tunable.query') as context: - call('tunable.delete', tunable['id'], job=True) - - assert_result(context, 'tunable.query', tunable['id'], 'removed') diff --git a/tests/api2/test_dataset_encryption_keys_in_replication.py b/tests/api2/test_dataset_encryption_keys_in_replication.py deleted file mode 100644 index cbeb12bb732ac..0000000000000 --- a/tests/api2/test_dataset_encryption_keys_in_replication.py +++ /dev/null @@ -1,151 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.assets.replication import replication_task -from middlewared.test.integration.utils import call - - -BASE_REPLICATION = { - 'direction': 'PUSH', - 'transport': 'LOCAL', - 'source_datasets': [], - 'target_dataset': None, - 'recursive': False, - 'auto': False, - 'retention_policy': 'NONE', -} - - -def encryption_props(): - return { - 'encryption_options': {'generate_key': True}, - 'encryption': True, - 'inherit_encryption': False - } - - -def make_assertions(source_datasets, task_id, target_dataset, unlocked_datasets): - for source_ds in source_datasets: - call('zfs.snapshot.create', {'dataset': source_ds, 'name': 'snaptest-1', 'recursive': True}) - - call('replication.run', task_id, job=True) - keys = call('pool.dataset.export_keys_for_replication_internal', task_id) - unlocked_info = call( - 'pool.dataset.unlock', target_dataset.split('/', 1)[0], { - 'datasets': [{'name': name, 'key': key} for name, key in keys.items()], - 'recursive': True, - }, job=True - ) - assert set(unlocked_info['unlocked']) == set(unlocked_datasets), unlocked_info - - -def test_single_source_replication(): - with dataset('source_test', encryption_props()) as src: - with dataset('parent_destination', encryption_props()) as parent_ds: - with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst: - with replication_task({ - **BASE_REPLICATION, - 'name': 'encryption_replication_test', - 'source_datasets': [src], - 'target_dataset': dst, - 'name_regex': '.+', - 'auto': False, - }) as task: - make_assertions([src], task['id'], dst, [dst]) - - -def test_single_source_recursive_replication(): - with dataset('source_test', encryption_props()) as src: - with dataset(f'{src.rsplit("/", 1)[-1]}/child_source_test', encryption_props()) as child_src: - with dataset('parent_destination', encryption_props()) as parent_ds: - with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst: - with replication_task({ - **BASE_REPLICATION, - 'name': 'encryption_replication_test', - 'source_datasets': [src], - 'target_dataset': dst, - 'name_regex': '.+', - 'auto': False, - 'recursive': True, - }) as task: - make_assertions([src], task['id'], dst, [dst, f'{dst}/{child_src.rsplit("/", 1)[-1]}']) - - -def test_single_source_child_encrypted_replication(): - with dataset('source_test', encryption_props()) as src: - with dataset(f'{src.rsplit("/", 1)[-1]}/child_source_test', encryption_props()) as child_src: - with dataset('parent_destination', encryption_props()) as parent_ds: - with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst: - with replication_task({ - **BASE_REPLICATION, - 'name': 'encryption_replication_test', - 'source_datasets': [child_src], - 'target_dataset': dst, - 'name_regex': '.+', - 'auto': False, - 'recursive': True, - }) as task: - make_assertions([child_src], task['id'], dst, [dst]) - - -def test_multiple_source_replication(): - with dataset('source_test1', encryption_props()) as src1: - with dataset('source_test2', encryption_props()) as src2: - with dataset('parent_destination', encryption_props()) as parent_ds: - with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst: - with replication_task({ - **BASE_REPLICATION, - 'name': 'encryption_replication_test', - 'source_datasets': [src1, src2], - 'target_dataset': dst, - 'name_regex': '.+', - 'auto': False, - }) as task: - make_assertions( - [src1, src2], task['id'], dst, [f'{dst}/{k.rsplit("/", 1)[-1]}' for k in [src1, src2]] - ) - - -def test_multiple_source_recursive_replication(): - with dataset('source_test1', encryption_props()) as src1: - with dataset(f'{src1.rsplit("/", 1)[-1]}/child_source_test1', encryption_props()) as child_src1: - with dataset('source_test2', encryption_props()) as src2: - with dataset(f'{src2.rsplit("/", 1)[-1]}/child_source_test2', encryption_props()) as child_src2: - with dataset('parent_destination', encryption_props()) as parent_ds: - with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst: - with replication_task({ - **BASE_REPLICATION, - 'name': 'encryption_replication_test', - 'source_datasets': [src1, src2], - 'target_dataset': dst, - 'name_regex': '.+', - 'auto': False, - 'recursive': True, - }) as task: - make_assertions( - [src1, src2], task['id'], dst, [ - f'{dst}/{"/".join(k.rsplit("/")[-abs(n):])}' for k, n in [ - (src1, 1), (src2, 1), (child_src1, 2), (child_src2, 2), - ] - ] - ) - - -@pytest.mark.parametrize('keys_available_for_download', [False, True]) -def test_replication_task_reports_keys_available_for_download(keys_available_for_download): - with dataset('source_test', encryption_props() if keys_available_for_download else {}) as src: - with dataset('parent_destination', encryption_props() if keys_available_for_download else {}) as parent_ds: - with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst: - with replication_task({ - **BASE_REPLICATION, - 'name': 'encryption_replication_test', - 'source_datasets': [src], - 'target_dataset': dst, - 'name_regex': '.+', - 'auto': False, - }) as task: - task = call( - 'replication.get_instance', task['id'], {'extra': {'check_dataset_encryption_keys': True}} - ) - assert task['has_encrypted_dataset_keys'] is keys_available_for_download, task - diff --git a/tests/api2/test_dataset_mount.py b/tests/api2/test_dataset_mount.py deleted file mode 100644 index 750e0b2416792..0000000000000 --- a/tests/api2/test_dataset_mount.py +++ /dev/null @@ -1,19 +0,0 @@ -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, ssh - - -def test_dataset_mount_on_readonly_dataset(): - src_parent_dataset_name = 'parent_src' - with dataset(src_parent_dataset_name) as parent_src: - with dataset(f'{src_parent_dataset_name}/child1', {'readonly': 'ON'}) as child1_ds: - with dataset(f'{src_parent_dataset_name}/child2', {'readonly': 'ON'}) as child2_ds: - call('zfs.dataset.create', {'name': f'{child1_ds}/failed'}) - call('zfs.dataset.umount', parent_src, {'force': True}) - call('zfs.dataset.mount', parent_src, {'recursive': True}) - for source_dataset, mounted in ( - (parent_src, 'yes'), - (child1_ds, 'yes'), - (f'{child1_ds}/failed', 'no'), - (child2_ds, 'yes'), - ): - assert call('zfs.dataset.get_instance', source_dataset)['properties']['mounted']['value'] == mounted diff --git a/tests/api2/test_dataset_unlock_validation.py b/tests/api2/test_dataset_unlock_validation.py deleted file mode 100644 index 23088a1db0d92..0000000000000 --- a/tests/api2/test_dataset_unlock_validation.py +++ /dev/null @@ -1,50 +0,0 @@ -import os -import pytest - -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, ssh -from truenas_api_client import ValidationErrors - - -PASSPHRASE = '12345678' - - -def encryption_props(): - return { - 'encryption_options': {'generate_key': False, 'passphrase': PASSPHRASE}, - 'encryption': True, - 'inherit_encryption': False - } - - -@pytest.mark.parametrize( - 'nested_dir,lock_dataset', [('test_dir', True), ('parent/child', True), ('test_dir', False)] -) -def test_encrypted_dataset_unlock_mount_validation(nested_dir, lock_dataset): - with dataset('test_dataset', encryption_props()) as encrypted_ds: - mount_point = os.path.join('/mnt', encrypted_ds) - - if lock_dataset: - call('pool.dataset.lock', encrypted_ds, job=True) - call('filesystem.set_immutable', False, mount_point) - - ssh(f'mkdir -p {os.path.join(mount_point, nested_dir)}') - - if lock_dataset: - with pytest.raises(ValidationErrors) as ve: - call( - 'pool.dataset.unlock', encrypted_ds.split('/')[0], - {'datasets': [{'passphrase': PASSPHRASE, 'name': encrypted_ds}], 'recursive': True}, job=True - ) - - assert ve.value.errors[0].attribute == 'unlock_options.datasets.0.force' - assert ve.value.errors[0].errmsg == f'\'{mount_point}\' directory is not empty (please provide' \ - ' "force" flag to override this error and file/directory will be' \ - ' renamed once the dataset is unlocked)' - else: - call( - 'pool.dataset.unlock', encrypted_ds.split('/')[0], - {'datasets': [{'passphrase': PASSPHRASE, 'name': encrypted_ds}], 'recursive': True}, job=True - ) - - ssh(f'rm -rf {mount_point}') diff --git a/tests/api2/test_device_get_disk_names.py b/tests/api2/test_device_get_disk_names.py deleted file mode 100644 index 62437a8089076..0000000000000 --- a/tests/api2/test_device_get_disk_names.py +++ /dev/null @@ -1,5 +0,0 @@ -from middlewared.test.integration.utils import call - - -def test_device_get_disk_names(): - assert set(list(call('device.get_disks', False, True))) == set(call('device.get_disk_names')) diff --git a/tests/api2/test_device_get_disks_size.py b/tests/api2/test_device_get_disks_size.py deleted file mode 100644 index 47df48d8f581e..0000000000000 --- a/tests/api2/test_device_get_disks_size.py +++ /dev/null @@ -1,7 +0,0 @@ -from middlewared.test.integration.utils import call, ssh - - -def test_device_get_disks_size(): - boot_disk = call('boot.get_disks')[0] - fdisk_size = int(ssh(f'fdisk -s /dev/{boot_disk}').strip()) * 1024 - assert call('device.get_disks')[boot_disk]['size'] == fdisk_size diff --git a/tests/api2/test_disk_format.py b/tests/api2/test_disk_format.py deleted file mode 100644 index a3f367102b8e0..0000000000000 --- a/tests/api2/test_disk_format.py +++ /dev/null @@ -1,97 +0,0 @@ -import json -import time - -from middlewared.test.integration.utils import call, ssh - -""" -We use 'parted' to partition disks. -Verification is based on 'parted' documentation (https://people.redhat.com/msnitzer/docs/io-limits.txt): - The heuristic parted uses is: - 1) Always use the reported 'alignment_offset' as the offset for the - start of the first primary partition. - 2a) If 'optimal_io_size' is defined (not 0) align all partitions on an - 'optimal_io_size' boundary. - 2b) If 'optimal_io_size' is undefined (0) and 'alignment_offset' is 0 - and 'minimum_io_size' is a power of 2: use a 1MB default alignment. - - as you can see this is the catch all for "legacy" devices which - don't appear to provide "I/O hints"; so in the default case all - partitions will align on a 1MB boundary. - - NOTE: we can't distinguish between a "legacy" device and modern - device that provides "I/O hints" with alignment_offset=0 and - optimal_io_size=0. Such a device might be a single SAS 4K device. - So worst case we lose < 1MB of space at the start of the disk. -""" -# Some 'constants' -MBR_SECTOR_GAP = 34 -ONE_MB = 1048576 -DATA_TYPE_UUID = "6a898cc3-1dd2-11b2-99a6-080020736631" - - -def get_parted_info(disk_path): - # By the time this is called, the disk has been formatted - # but the kernel might not have been made fully aware of the changes - # so let's retry a bit before failing - for i in range(10): - pbytes = json.loads(ssh(f'parted {disk_path} unit b p --json'))['disk'] - if pbytes.get('partitions') is None: - time.sleep(1) - else: - break - else: - assert False, f'parted tool failed to find partitions (in bytes) on {disk_path!r} ({pbytes!r})' - - for i in range(10): - psectors = json.loads(ssh(f'parted {disk_path} unit s p --json'))['disk'] - if psectors.get('partitions') is None: - time.sleep(1) - else: - break - else: - assert False, f'parted tool failed to find partitions (in sectors) on {disk_path!r} ({psectors!r})' - - return pbytes, psectors - - -def test_disk_format_and_wipe(): - """Generate a single data partition""" - # get an unused disk and format it - unused = call('disk.get_unused') - assert unused, 'Need at least 1 unused disk' - call('disk.format', unused[0]['name']) - partitions = call('disk.list_partitions', unused[0]['name']) - assert partitions, partitions - - # The first and only partition should be data - assert len(partitions) == 1, partitions - partition = partitions[0] - assert partition['partition_type'] == DATA_TYPE_UUID - - # we used libparted to format a drive so let's - # validate our API matches parted output (NOTE: - # we check both bytes and sectors) - parted_bytes, parted_sectors = get_parted_info(f'/dev/{unused[0]["name"]}') - - # sanity check (make sure parted shows same number of partitions) - assert len(parted_bytes['partitions']) == len(partitions), parted_bytes['partitions'] - assert len(parted_sectors['partitions']) == len(partitions), parted_sectors['partitions'] - - # validate our API shows proper start/end sizes in bytes - pbyte = parted_bytes['partitions'][0] - assert int(pbyte['size'].split('B')[0]) == partition['size'] - assert int(pbyte['start'].split('B')[0]) == partition['start'] - assert int(pbyte['end'].split('B')[0]) == partition['end'] - - # validate our API shows proper start/end sizes in sectors - psect = parted_sectors['partitions'][0] - assert int(psect['start'].split('s')[0]) == partition['start_sector'] - assert int(psect['end'].split('s')[0]) == partition['end_sector'] - - # verify wipe disk should removes partition labels - call('disk.wipe', partition['disk'], 'QUICK', job=True) - # the partitions are removed - new_parts = call('disk.list_partitions', partition['disk']) - assert len(new_parts) == 0, new_parts - - # sanity check, make sure parted doesn't see partitions either - pbytes = json.loads(ssh(f'parted /dev/{unused[0]["name"]} unit b p --json'))['disk'] - assert pbytes.get('partitions') is None, repr(pbytes) diff --git a/tests/api2/test_disk_get_dev_size.py b/tests/api2/test_disk_get_dev_size.py deleted file mode 100644 index 086981d849687..0000000000000 --- a/tests/api2/test_disk_get_dev_size.py +++ /dev/null @@ -1,15 +0,0 @@ -import json - -import pytest - -from middlewared.test.integration.utils import call, ssh - - -@pytest.fixture(scope="session") -def blockdevices(): - return {i['name']: i for i in json.loads(ssh('lsblk -bJ -o NAME,SIZE'))['blockdevices']} - - -def test_get_dev_size_for_all_disks(blockdevices): - for disk, disk_info in blockdevices.items(): - assert disk_info['size'] == call('disk.get_dev_size', disk) diff --git a/tests/api2/test_disk_temperature.py b/tests/api2/test_disk_temperature.py deleted file mode 100644 index ded544bccffe6..0000000000000 --- a/tests/api2/test_disk_temperature.py +++ /dev/null @@ -1,120 +0,0 @@ -import time -from unittest.mock import ANY - -import pytest - -from middlewared.test.integration.utils import call, mock - - - -@pytest.fixture(autouse=True, scope="function") -def reset_temperature_cache(): - call("disk.reset_temperature_cache") - - -def test_disk_temperature(): - with mock("disk.temperature_uncached", return_value=50): - assert call("disk.temperature", "sda") == 50 - - -def test_disk_temperature_cache(): - with mock("disk.temperature_uncached", return_value=50): - call("disk.temperature", "sda") - - with mock("disk.temperature_uncached", exception=True): - assert call("disk.temperature", "sda", {"cache": 300}) == 50 - - -def test_disk_temperature_cache_expires(): - with mock("disk.temperature_uncached", return_value=50): - call("disk.temperature", "sda") - - time.sleep(3) - - with mock("disk.temperature_uncached", return_value=60): - assert call("disk.temperature", "sda", {"cache": 2}) == 60 - - -def test_disk_temperatures_only_cached(): - with mock("disk.temperature_uncached", return_value=50): - call("disk.temperature", "sda") - - with mock("disk.temperature_uncached", exception=True): - assert call("disk.temperatures", ["sda"], {"only_cached": True}) == {"sda": 50} - - -def test_disk_temperature_alerts(): - sda_temperature_alert = { - "uuid": "a11a16a9-a28b-4005-b11a-bce6af008d86", - "source": "", - "klass": "SMART", - "args": { - "device": "/dev/sda", - "message": "Device: /dev/sda, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63)" - }, - "node": "Controller A", - "key": "{\"device\": \"/dev/sda\", \"message\": \"Device: /dev/sda, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63)\"}", - "datetime": { - "$date": 1657098825510 - }, - "last_occurrence": { - "$date": 1657185226656 - }, - "dismissed": False, - "mail": None, - "text": "%(message)s.", - "id": "a11a16a9-a28b-4005-b11a-bce6af008d86", - "level": "CRITICAL", - "formatted": "Device: /dev/sda, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63).", - "one_shot": True, - } - sdb_temperature_alert = { - "uuid": "66e29e1c-2948-4473-928a-3ccf0c0aefa9", - "source": "", - "klass": "SMART", - "args": { - "device": "/dev/sdb", - "message": "Device: /dev/sdb, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63)" - }, - "node": "Controller A", - "key": "{\"device\": \"/dev/sdb\", \"message\": \"Device: /dev/sdb, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63)\"}", - "datetime": { - "$date": 1657098825510 - }, - "last_occurrence": { - "$date": 1657185226656 - }, - "dismissed": False, - "mail": None, - "text": "%(message)s.", - "id": "a11a16a9-a28b-4005-b11a-bce6af008d86", - "level": "CRITICAL", - "formatted": "Device: /dev/sdb, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63).", - "one_shot": True, - } - unrelated_alert = { - "uuid": "c371834a-5168-474d-a6d0-9eac02ad29a7", - "source": "", - "klass": "ScrubStarted", - "args": "temp", - "node": "Controller A", - "key": "\"temp\"", - "datetime": { - "$date": 1657713495028 - }, - "last_occurrence": { - "$date": 1657713495028 - }, - "dismissed": False, - "mail": None, - "text": "Scrub of pool %r started.", - "id": "c371834a-5168-474d-a6d0-9eac02ad29a7", - "level": "INFO", - "formatted": "Scrub of pool 'temp' started.", - "one_shot": True, - } - - with mock("alert.list", return_value=[sda_temperature_alert, sdb_temperature_alert, unrelated_alert]): - assert call("disk.temperature_alerts", ["sda"]) == [dict(sda_temperature_alert, - datetime=ANY, - last_occurrence=ANY)] diff --git a/tests/api2/test_disk_wipe.py b/tests/api2/test_disk_wipe.py deleted file mode 100644 index 24be9938bd411..0000000000000 --- a/tests/api2/test_disk_wipe.py +++ /dev/null @@ -1,110 +0,0 @@ -import time - -import pytest - -from auto_config import ha -from middlewared.test.integration.utils import call, ssh - - -def test_disk_wipe_partition_clean(): - """Confirm we clean up around the middle partitions""" - signal_msg = "ix private data" - disk = call("disk.get_unused")[0]["name"] - - # Create a data partition - call('disk.format', disk) - parts = call('disk.list_partitions', disk) - seek_blk = parts[0]['start_sector'] - blk_size = parts[0]['start'] // parts[0]['start_sector'] - - # Write some private data into the start of the data partition - ssh( - f"echo '{signal_msg}' > junk;" - f"dd if=junk bs={blk_size} count=1 oseek={seek_blk} of=/dev/{disk};" - "rm -f junk" - ) - - # Confirm presence - readback_presence = ssh(f"dd if=/dev/{disk} bs={blk_size} iseek={seek_blk} count=1").splitlines()[0] - assert signal_msg in readback_presence - - # Clean the drive - call('disk.wipe', disk, 'QUICK', job=True) - - # Confirm it's now clean - readback_clean = ssh(f"dd if=/dev/{disk} bs={blk_size} iseek={seek_blk} count=1").splitlines()[0] - assert signal_msg not in readback_clean - - # Confirm we have no partitions from middleware - partitions = call('disk.list_partitions', disk) - assert len(partitions) == 0 - - # Confirm the kernel partition tables indicate no partitions - proc_partitions = str(ssh('cat /proc/partitions')) - # If the wipe is truly successful /proc/partitions should have a singular - # entry for 'disk' in the table - assert len([line for line in proc_partitions.splitlines() if disk in line]) == 1 - - -@pytest.mark.parametrize('dev_name', ['BOOT', 'UNUSED', 'bogus', '']) -def test_disk_get_partitions_quick(dev_name): - """ - dev_name: - 'BOOT' - find a proper device that has partitions - 'UNUSED' - find a proper device that does not have partitons - All others are failure tests. All failures are properly handled - and should return an empty dictionary - """ - has_partitions = False - if 'BOOT' == dev_name: - dev_name = call('boot.get_disks')[0] - has_partitions = True - elif 'UNUSED' == dev_name: - # NOTE: 'unused' disks typically have no partitions - dev_name = call('disk.get_unused')[0]['name'] - - parts = call('disk.get_partitions_quick', dev_name) - assert has_partitions == (len(parts) > 0) - - -def test_disk_wipe_abort(): - """Test that we can sucessfully abort a disk.wipe job""" - expected_pids = set() - if ha: - # In HA systems fenced may be using the disk. Obtain the PID - # so that we can ignore it. - fenced_info = call('failover.fenced.run_info') - if fenced_info['running']: - expected_pids.add(str(fenced_info['pid'])) - - # Obtain a disk to wipe - disk = call("disk.get_unused")[0]["name"] - - job_id = call("disk.wipe", disk, "FULL") - - # Wait for wipe process to actually start - for i in range(20): - job = call("core.get_jobs", [["id", "=", job_id]], {"get": True}) - if job["progress"]["percent"] > 0: - break - - time.sleep(0.1) - else: - assert False, job - - call("core.job_abort", job_id) - - for i in range(20): - result = set(ssh(f"fuser /dev/{disk}", check=False).strip().split()) - # Check that only the expected PIDs are using the disk - # (which means that the abort was completed successfully) - if result == expected_pids: - # Ensure that the job was aborted before completion - job = call("core.get_jobs", [["id", "=", job_id]], {"get": True}) - assert job["state"] == "ABORTED" - assert job["progress"]["percent"] < 95 - break - - time.sleep(0.1) - else: - assert False, result diff --git a/tests/api2/test_disk_zfs_guid.py b/tests/api2/test_disk_zfs_guid.py deleted file mode 100644 index 0a0f76eb650a5..0000000000000 --- a/tests/api2/test_disk_zfs_guid.py +++ /dev/null @@ -1,66 +0,0 @@ -from datetime import datetime - -from middlewared.test.integration.utils import call -from middlewared.test.integration.utils.mock import mock -from middlewared.test.integration.utils.mock_db import mock_table_contents - -DISK_TEMPLATE = { - "disk_subsystem": "scsi", - "disk_number": 2160, - "disk_serial": "", - "disk_lunid": None, - "disk_size": "17179869184", - "disk_description": "", - "disk_transfermode": "Auto", - "disk_hddstandby": "Always On", - "disk_advpowermgmt": "Disabled", - "disk_togglesmart": True, - "disk_smartoptions": "", - "disk_expiretime": None, - "disk_enclosure_slot": None, - "disk_passwd": "", - "disk_critical": None, - "disk_difference": None, - "disk_informational": None, - "disk_model": "VBOX_HARDDISK", - "disk_rotationrate": None, - "disk_type": "HDD", - "disk_kmip_uid": None, - "disk_zfs_guid": None, - "disk_bus": "ATA" -} - - -def test_does_not_set_zfs_guid_for_expired_disk(): - with mock_table_contents( - "storage.disk", - [ - {**DISK_TEMPLATE, "disk_identifier": "{serial}1", "disk_name": "sda", "disk_expiretime": datetime.utcnow()}, - {**DISK_TEMPLATE, "disk_identifier": "{serial}2", "disk_name": "sda"}, - ], - ): - with mock("pool.flatten_topology", return_value=[ - {"type": "DISK", "disk": "sda", "guid": "guid1"}, - ]): - call("disk.sync_zfs_guid", { - "topology": "MOCK", - }) - - assert call( - "datastore.query", "storage.disk", [["disk_identifier", "=", "{serial}1"]], {"get": True}, - )["disk_zfs_guid"] is None - assert call( - "datastore.query", "storage.disk", [["disk_identifier", "=", "{serial}2"]], {"get": True}, - )["disk_zfs_guid"] == "guid1" - - -def test_does_not_return_expired_disks_with_same_guid(): - with mock_table_contents( - "storage.disk", - [ - {**DISK_TEMPLATE, "disk_identifier": "{serial}1", "disk_name": "sda", "disk_expiretime": datetime.utcnow(), - "disk_zfs_guid": "guid1"}, - {**DISK_TEMPLATE, "disk_identifier": "{serial}2", "disk_name": "sda", "disk_zfs_guid": "guid1"}, - ] - ): - assert call("disk.disk_by_zfs_guid", "guid1")["identifier"] == "{serial}2" diff --git a/tests/api2/test_draid.py b/tests/api2/test_draid.py deleted file mode 100644 index e8c217427da6f..0000000000000 --- a/tests/api2/test_draid.py +++ /dev/null @@ -1,97 +0,0 @@ -import pytest - -from truenas_api_client import ValidationErrors -from middlewared.test.integration.assets.pool import another_pool -from middlewared.test.integration.utils import call - - -POOL_NAME = 'test_draid_pool' - - -@pytest.mark.parametrize( - 'n_data,n_spare,n_parity', [ - (1, 0, 1), - (1, 1, 1), - (1, 0, 2), - (1, 1, 2), - (2, 2, 2), - (1, 1, 3), - ] -) -def test_valid_draid_pool_creation(n_data, n_spare, n_parity): - unused_disks = call('disk.get_unused') - if len(unused_disks) < 5: - pytest.skip('Insufficient number of disk to perform these test') - - children = n_data + n_parity + n_spare - with another_pool({ - 'name': POOL_NAME, - 'topology': { - 'data': [{ - 'disks': [disk['name'] for disk in unused_disks[:children]], - 'type': f'DRAID{n_parity}', - 'draid_data_disks': n_data, - 'draid_spare_disks': n_spare - }], - }, - 'allow_duplicate_serials': True, - }) as draid: - assert draid['topology']['data'][0]['name'] == f'draid{n_parity}:{n_data}d:{children}c:{n_spare}s-0' - unused_disk_for_update = call('disk.get_unused') - if len(unused_disk_for_update) >= children: - draid_pool_updated = call( - 'pool.update', draid['id'], { - 'topology': { - 'data': [{ - 'type': f'DRAID{n_parity}', - 'disks': [disk['name'] for disk in unused_disk_for_update[:children]], - 'draid_data_disks': n_data, - 'draid_spare_disks': n_spare - }] - }, - 'allow_duplicate_serials': True, - }, job=True) - assert len(draid_pool_updated['topology']['data']) == 2 - assert draid_pool_updated['topology']['data'][1]['name'] == f'draid{n_parity}:{n_data}d:{children}c' \ - f':{n_spare}s-1' - - -@pytest.mark.parametrize( - 'n_data,n_spare,n_parity,minimum_disk', [ - (0, 0, 1, 2), - (0, 2, 1, 2), - (0, 0, 2, 3), - (0, 0, 3, 4), - (0, 2, 1, 2), - (0, 2, 2, 3), - ] -) -def test_invalid_draid_pool_creation(n_data, n_spare, n_parity, minimum_disk): - unused_disks = call('disk.get_unused') - if len(unused_disks) < 3: - pytest.skip('Insufficient number of disk to perform these test') - - children = n_data + n_parity + n_spare - - with pytest.raises(ValidationErrors) as ve: - call('pool.create', { - 'name': POOL_NAME, - 'topology': { - 'data': [{ - 'disks': [disk['name'] for disk in unused_disks[:children]], - 'type': f'DRAID{n_parity}', - 'draid_data_disks': n_data, - 'draid_spare_disks': n_spare, - }], - }, - 'allow_duplicate_serials': True, - }, job=True) - - if n_spare: - assert ve.value.errors[0].attribute == 'pool_create.topology.data.0.type' - assert ve.value.errors[0].errmsg == f'Requested number of dRAID data disks per group {n_data}' \ - f' is too high, at most {children - n_spare - n_parity}' \ - f' disks are available for data' - else: - assert ve.value.errors[0].attribute == 'pool_create.topology.data.0.disks' - assert ve.value.errors[0].errmsg == f'You need at least {minimum_disk} disk(s) for this vdev type.' diff --git a/tests/api2/test_draid_record_and_block_size.py b/tests/api2/test_draid_record_and_block_size.py deleted file mode 100644 index b2dc129d79e49..0000000000000 --- a/tests/api2/test_draid_record_and_block_size.py +++ /dev/null @@ -1,190 +0,0 @@ -import pytest - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.pool import another_pool -from middlewared.test.integration.utils import call - -from auto_config import ha - - -@pytest.fixture(scope='module') -def check_unused_disks(): - if len(call('disk.get_unused')) < 4: - pytest.skip('Insufficient number of disks to perform these tests') - - -@pytest.fixture(scope='module') -def draid_pool(): - unused_disks = call('disk.get_unused') - with another_pool({ - 'name': 'test_draid_pool', - 'topology': { - 'data': [{ - 'disks': [disk['name'] for disk in unused_disks[:2]], - 'type': 'DRAID1', - 'draid_data_disks': 1 - }], - }, - 'allow_duplicate_serials': True, - }) as pool_name: - yield pool_name - - -@pytest.fixture(scope='module') -def mirror_pool(): - unused_disks = call('disk.get_unused') - with another_pool({ - 'name': 'test_mirror_pool', - 'topology': { - 'data': [{ - 'disks': [disk['name'] for disk in unused_disks[:2]], - 'type': 'MIRROR', - }], - }, - 'allow_duplicate_serials': True, - }) as pool_name: - yield pool_name - - -@pytest.mark.usefixtures('check_unused_disks') -@pytest.mark.parametrize( - 'record_size', ['1M'] -) -def test_draid_pool_default_record_size(draid_pool, record_size): - assert call('pool.dataset.get_instance', draid_pool['name'])['recordsize']['value'] == record_size - - -@pytest.mark.usefixtures('check_unused_disks') -@pytest.mark.parametrize( - 'record_size', ['128K'] -) -def test_non_draid_pool_default_record_size(mirror_pool, record_size): - assert call('pool.dataset.get_instance', mirror_pool['name'])['recordsize']['value'] == record_size - - -@pytest.mark.usefixtures('check_unused_disks') -@pytest.mark.parametrize( - 'update_recordsize, validation_error', [ - ('512K', False), - ('256K', False), - ('128K', False), - ('2M', False), - ('512', True), - ('4K', True), - ('64K', True), - ] -) -def test_draid_root_dataset_valid_recordsize(draid_pool, update_recordsize, validation_error): - if not validation_error: - assert call( - 'pool.dataset.update', draid_pool['name'], {'recordsize': update_recordsize} - )['recordsize']['value'] == update_recordsize - else: - with pytest.raises(ValidationErrors) as ve: - call('pool.dataset.update', draid_pool['name'], {'recordsize': update_recordsize}) - - assert ve.value.errors[0].attribute == 'pool_dataset_update.recordsize' - assert ve.value.errors[0].errmsg == f"'{update_recordsize}' is an invalid recordsize." - - -@pytest.mark.usefixtures('check_unused_disks') -@pytest.mark.parametrize( - 'update_recordsize', ['512K', '256K', '128K', '2M', '512', '4K', '64K'] -) -def test_non_draid_root_dataset_valid_recordsize(mirror_pool, update_recordsize): - assert call( - 'pool.dataset.update', mirror_pool['name'], {'recordsize': update_recordsize} - )['recordsize']['value'] == update_recordsize - - -@pytest.mark.usefixtures('check_unused_disks') -@pytest.mark.parametrize( - 'recordsize, validation_error', [ - ('512K', False), - ('256K', False), - ('128K', False), - ('2M', False), - ('512', True), - ('4K', True), - ('64K', True), - ] -) -def test_draid_dataset_valid_recordsize(draid_pool, recordsize, validation_error): - if not validation_error: - assert call( - 'pool.dataset.create', {'name': f'{draid_pool["name"]}/test_dataset_{recordsize}', 'recordsize': recordsize} - )['recordsize']['value'] == recordsize - else: - with pytest.raises(ValidationErrors) as ve: - call('pool.dataset.create', {'name': f'{draid_pool["name"]}/test_dataset_{recordsize}', - 'recordsize': recordsize}) - - assert ve.value.errors[0].attribute == 'pool_dataset_create.recordsize' - assert ve.value.errors[0].errmsg == f"'{recordsize}' is an invalid recordsize." - - -@pytest.mark.usefixtures('check_unused_disks') -@pytest.mark.parametrize( - 'recordsize', ['512K', '256K', '128K', '2M', '512', '4K', '64K'] -) -def test_non_draid_dataset_valid_recordsize(mirror_pool, recordsize): - assert call( - 'pool.dataset.create', {'name': f'{mirror_pool["name"]}/test_dataset_{recordsize}', 'recordsize': recordsize} - )['recordsize']['value'] == recordsize - - -@pytest.mark.usefixtures('check_unused_disks') -@pytest.mark.parametrize( - 'blocksize,validation_error', [ - ('16K', True), - ('32K', False), - ] -) -def test_draid_zvol_valid_blocksize(draid_pool, blocksize, validation_error): - if not validation_error: - assert call( - 'pool.dataset.create', { - 'name': f'{draid_pool["name"]}/test_dataset_{blocksize}', 'volsize': 268468224, - 'volblocksize': blocksize, 'type': 'VOLUME', - } - )['volblocksize']['value'] == blocksize - else: - with pytest.raises(ValidationErrors) as ve: - call( - 'pool.dataset.create', { - 'name': f'{draid_pool["name"]}/test_dataset_{blocksize}', 'volsize': 268468224, - 'volblocksize': blocksize, 'type': 'VOLUME' - } - ) - - assert ve.value.errors[0].attribute == 'pool_dataset_create.volblocksize' - assert ve.value.errors[0].errmsg == 'Volume block size must be greater than or equal to 32K for dRAID pools' - - -@pytest.mark.usefixtures('check_unused_disks') -@pytest.mark.parametrize( - 'blocksize', ['16K', '32K'] -) -def test_non_draid_zvol_valid_blocksize(mirror_pool, blocksize): - assert call( - 'pool.dataset.create', { - 'name': f'{mirror_pool["name"]}/test_dataset_{blocksize}', 'volsize': 268468224, - 'volblocksize': blocksize, 'type': 'VOLUME', - } - )['volblocksize']['value'] == blocksize - - -@pytest.mark.usefixtures('check_unused_disks') -@pytest.mark.parametrize( - 'update_recordsize, default_record_size', [ - ('512K', '1M'), - ] -) -def test_draid_dataset_default_recordsize(draid_pool, update_recordsize, default_record_size): - assert call( - 'pool.dataset.update', draid_pool['name'], {'recordsize': update_recordsize} - )['recordsize']['value'] == update_recordsize - - assert call( - 'pool.dataset.create', {'name': f'{draid_pool["name"]}/test_dataset'} - )['recordsize']['value'] == default_record_size diff --git a/tests/api2/test_enable_disable_services.py b/tests/api2/test_enable_disable_services.py deleted file mode 100644 index bd20cdee12993..0000000000000 --- a/tests/api2/test_enable_disable_services.py +++ /dev/null @@ -1,11 +0,0 @@ -from middlewared.test.integration.utils import call - - -def test_01_enable_services(): - for svc in filter(lambda x: not x['enable'], call('service.query')): - call('service.update', svc['id'], {'enable': True}) - - -def test_02_disable_services(): - for svc in filter(lambda x: x['enable'], call('service.query')): - call('service.update', svc['id'], {'enable': False}) diff --git a/tests/api2/test_encrypted_dataset_services_restart.py b/tests/api2/test_encrypted_dataset_services_restart.py deleted file mode 100644 index 760de61f2c214..0000000000000 --- a/tests/api2/test_encrypted_dataset_services_restart.py +++ /dev/null @@ -1,73 +0,0 @@ -import contextlib - -import pytest -from pytest_dependency import depends -from middlewared.test.integration.utils import call -from middlewared.test.integration.assets.pool import dataset - -import os -import sys -sys.path.append(os.getcwd()) - - -PASSPHRASE = 'testing123' - - -@contextlib.contextmanager -def enable_auto_start(service_name): - service = call('service.query', [['service', '=', service_name]], {'get': True}) - try: - yield call('service.update', service['id'], {'enable': True}) - finally: - call('service.update', service['id'], {'enable': False}) - - -@contextlib.contextmanager -def start_service(service_name): - try: - yield call('service.start', service_name) - finally: - call('service.stop', service_name) - - -@contextlib.contextmanager -def lock_dataset(dataset_name): - try: - yield call('pool.dataset.lock', dataset_name, {'force_umount': True}, job=True) - finally: - call( - 'pool.dataset.unlock', dataset_name, { - 'datasets': [{'passphrase': PASSPHRASE, 'name': dataset_name}] - }, - job=True, - ) - - -def test_service_restart_on_unlock_dataset(request): - service_name = 'smb' - registered_name = 'cifs' - with dataset('testsvcunlock', data={ - 'encryption': True, - 'encryption_options': { - 'algorithm': 'AES-256-GCM', - 'pbkdf2iters': 350000, - 'passphrase': PASSPHRASE, - }, - 'inherit_encryption': False - }) as ds: - path = f'/mnt/{ds}' - share = call(f'sharing.{service_name}.create', {'path': path, 'name': 'smb-dataset'}) - assert share['locked'] is False - - with start_service(registered_name) as service_started: - assert service_started is True - - call('service.stop', registered_name) - assert call('service.started', registered_name) is False - with enable_auto_start(registered_name): - with lock_dataset(ds): - assert call(f'sharing.{service_name}.get_instance', share['id'])['locked'] is True - assert call('service.started', registered_name) is False - - assert call(f'sharing.{service_name}.get_instance', share['id'])['locked'] is False - assert call('service.started', registered_name) is True diff --git a/tests/api2/test_events.py b/tests/api2/test_events.py deleted file mode 100644 index 3f6a0e586077e..0000000000000 --- a/tests/api2/test_events.py +++ /dev/null @@ -1,14 +0,0 @@ -import pytest - -from middlewared.test.integration.utils import client - - -def test_can_subscribe_to_failover_status_event_without_authorization(): - with client(auth=None) as c: - c.subscribe("failover.status", lambda *args, **kwargs: None) - - -def test_can_not_subscribe_to_an_event_without_authorization(): - with client(auth=None) as c: - with pytest.raises(ValueError): - c.subscribe("core.get_jobs", lambda *args, **kwargs: None) diff --git a/tests/api2/test_filesystem__file_tail_follow.py b/tests/api2/test_filesystem__file_tail_follow.py deleted file mode 100644 index a4508465d4402..0000000000000 --- a/tests/api2/test_filesystem__file_tail_follow.py +++ /dev/null @@ -1,42 +0,0 @@ -import time - -import pytest - -from middlewared.test.integration.utils import client, ssh - -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) - - -@pytest.mark.flaky(reruns=5, reruns_delay=5) -def test_filesystem__file_tail_follow__grouping(): - ssh("echo > /tmp/file_tail_follow.txt") - - with client() as c: - received = [] - - def append(type, **kwargs): - received.append((time.monotonic(), kwargs["fields"]["data"])) - - c.subscribe("filesystem.file_tail_follow:/tmp/file_tail_follow.txt", append) - - ssh("for i in `seq 1 200`; do echo test >> /tmp/file_tail_follow.txt; sleep 0.01; done") - - # Settle down things - time.sleep(1) - - received = received[1:] # Initial file contents - # We were sending this for 2-3 seconds, so we should have received 4-6 blocks with 0.5 sec interval - assert 4 <= len(received) <= 6, str(received) - # All blocks should have been received uniformly in time - assert all(0.4 <= b2[0] - b1[0] <= 1.0 for b1, b2 in zip(received[:-1], received[1:])), str(received) - # All blocks should contain more or less same amount of data - assert all(len(block[1].split("\n")) <= 60 for block in received[:-1]), str(received) - - # One single send - ssh("echo finish >> /tmp/file_tail_follow.txt") - - time.sleep(1) - assert received[-1][1] == "finish\n" diff --git a/tests/api2/test_filesystem__put.py b/tests/api2/test_filesystem__put.py deleted file mode 100644 index 5be034b9c7f01..0000000000000 --- a/tests/api2/test_filesystem__put.py +++ /dev/null @@ -1,59 +0,0 @@ -import json -import os -import sys -import tempfile - -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import wait_on_job, POST -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call - - -def upload_file(file_path, file_path_on_tn): - data = {'method': 'filesystem.put', 'params': [file_path_on_tn]} - with open(file_path, 'rb') as f: - response = POST( - '/_upload/', - files={'data': json.dumps(data), 'file': f}, - use_ip_only=True, - force_new_headers=True, - ) - - job_id = json.loads(response.text)['job_id'] - return wait_on_job(job_id, 300) - - -def file_exists(file_path): - return any( - entry for entry in call('filesystem.listdir', os.path.dirname(file_path)) - if entry['name'] == os.path.basename(file_path) and entry['type'] == 'FILE' - ) - - -def test_put_file(): - upload_file_impl(False) - - -def test_put_file_in_locked_dataset(): - upload_file_impl(True) - - -def upload_file_impl(lock): - with tempfile.NamedTemporaryFile(mode='w') as f: - f.write('filesystem.put test') - f.flush() - - with dataset( - 'test_filesystem_put', data={ - 'encryption': True, - 'inherit_encryption': False, - 'encryption_options': {'passphrase': '12345678'} - }, - ) as test_dataset: - if lock: - call('pool.dataset.lock', test_dataset, job=True) - file_path_on_tn = f'/mnt/{test_dataset}/testfile' - job_detail = upload_file(f.name,file_path_on_tn) - assert job_detail['results']['state'] == ('FAILED' if lock else 'SUCCESS') - assert file_exists(file_path_on_tn) is not lock diff --git a/tests/api2/test_group_utils.py b/tests/api2/test_group_utils.py deleted file mode 100644 index 4c52902d3b72c..0000000000000 --- a/tests/api2/test_group_utils.py +++ /dev/null @@ -1,17 +0,0 @@ -from middlewared.test.integration.utils import call -from middlewared.test.integration.assets.account import group, user - - -def test_root_password_disabled(): - with group({"name": "group1"}) as g1: - with group({"name": "group2"}) as g2: - with user({ - "username": "test", - "full_name": "Test", - "group_create": True, - "groups": [g1["id"], g2["id"]], - "password": "test1234", - }) as u: - result = call("group.get_password_enabled_users", [g1["gid"], g2["gid"]], []) - assert len(result) == 1 - assert result[0]["id"] == u["id"] diff --git a/tests/api2/test_initshutdownscript.py b/tests/api2/test_initshutdownscript.py deleted file mode 100644 index 91cf4af44c259..0000000000000 --- a/tests/api2/test_initshutdownscript.py +++ /dev/null @@ -1,118 +0,0 @@ -import base64 -import contextlib -import errno -import stat -import time - -import pytest - -from middlewared.test.integration.utils import client, ssh -from middlewared.service_exception import ValidationErrors, ValidationError - -TEST_SCRIPT_FILE = '/root/.TEST_SCRIPT_FILE' -_775 = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH - - -@pytest.fixture(scope='module') -def ws_client(): - with client() as c: - yield c - - -@contextlib.contextmanager -def initshutudown_script(ws_client, contents, extra=None): - extra = extra or {} - - ws_client.call( - 'filesystem.file_receive', - TEST_SCRIPT_FILE, - base64.b64encode(contents.encode('utf-8')).decode(), - {'mode': _775}, - ) - script = ws_client.call( - 'initshutdownscript.create', - { - 'type': 'SCRIPT', - 'script': TEST_SCRIPT_FILE, - 'when': 'PREINIT', - **extra, - } - ) - try: - yield script - finally: - ws_client.call('initshutdownscript.delete', script['id']) - - -def test_initshutudown_script(ws_client): - with initshutudown_script(ws_client, 'echo "testing"') as script: - _id = script['id'] - filters = [['id', '=', _id]] - opts = {'get': True} - - # verify - assert ws_client.call('initshutdownscript.query', filters, opts)['script'] == TEST_SCRIPT_FILE - - # add a comment - ws_client.call('initshutdownscript.update', _id, {'comment': 'test_comment'}) - assert ws_client.call('initshutdownscript.query', filters, opts)['comment'] == 'test_comment' - - # disable it - ws_client.call('initshutdownscript.update', _id, {'enabled': False}) - assert ws_client.call('initshutdownscript.query', filters, opts)['enabled'] is False - - assert not ws_client.call('initshutdownscript.query', filters) - - -def test_initshutdown_script_bad(ws_client): - bad_script = f'/root/nonexistent-script' - with pytest.raises(ValidationErrors) as e: - ws_client.call( - 'initshutdownscript.create', - { - 'type': 'SCRIPT', - 'script': bad_script, - 'when': 'PREINIT', - } - ) - - assert e.value.errors == [ - ValidationError( - 'init_shutdown_script_create.script', - f'Path {bad_script} not found', - errno.ENOENT - ) - ] - - -def test_initshutdownscript_success(ws_client): - ssh("rm /tmp/flag", check=False) - - with initshutudown_script(ws_client, 'echo ok > /tmp/flag'): - ws_client.call('initshutdownscript.execute_init_tasks', 'PREINIT', job=True) - - assert ssh("cat /tmp/flag") == "ok\n" - - -def test_initshutdownscript_timeout(ws_client): - ssh("rm /tmp/flag", check=False) - - with initshutudown_script(ws_client, 'sleep 10', {"timeout": 2}): - start = time.monotonic() - ws_client.call('initshutdownscript.execute_init_tasks', 'PREINIT', job=True) - - assert time.monotonic() - start < 5 - - assert f"Timed out running SCRIPT: {TEST_SCRIPT_FILE!r}" in ssh("cat /var/log/middlewared.log") - - -def test_initshutdownscript_failure(ws_client): - ssh("rm /tmp/flag", check=False) - - with initshutudown_script(ws_client, 'echo everything went wrong > /dev/stderr; exit 1'): - ws_client.call('initshutdownscript.execute_init_tasks', 'PREINIT', job=True) - - assert ( - f"Failed to execute 'exec {TEST_SCRIPT_FILE}' with error 'everything went wrong\\n'" in - ssh("cat /var/log/middlewared.log") - ) diff --git a/tests/api2/test_ipa_join.py b/tests/api2/test_ipa_join.py deleted file mode 100644 index 44026cf75eeb6..0000000000000 --- a/tests/api2/test_ipa_join.py +++ /dev/null @@ -1,116 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.directory_service import ipa, FREEIPA_ADMIN_BINDPW -from middlewared.test.integration.assets.product import product_type -from middlewared.test.integration.utils import call, client -from middlewared.test.integration.utils.client import truenas_server - - -@pytest.fixture(scope="module") -def do_freeipa_connection(): - with ipa() as config: - yield config - - -@pytest.fixture(scope="function") -def override_product(): - if truenas_server.server_type == 'ENTERPRISE_HA': - yield - else: - with product_type(): - yield - - -@pytest.fixture(scope="function") -def enable_ds_auth(override_product): - sys_config = call('system.general.update', {'ds_auth': True}) - try: - yield sys_config - finally: - call('system.general.update', {'ds_auth': False}) - - -def test_setup_and_enabling_freeipa(do_freeipa_connection): - config = do_freeipa_connection - - ds = call('directoryservices.status') - assert ds['type'] == 'IPA' - assert ds['status'] == 'HEALTHY' - - alerts = [alert['klass'] for alert in call('alert.list')] - - # There's a one-shot alert that gets fired if we are an IPA domain - # connected via legacy mechanism. - assert 'IPALegacyConfiguration' not in alerts - - assert config['kerberos_realm'], str(config) - assert config['kerberos_principal'], str(config) - - # our kerberos principal should be the host one (not SMB or NFS) - assert config['kerberos_principal'].startswith('host/') - - -def test_accounts_cache(do_freeipa_connection): - ipa_users_cnt = call('user.query', [['local', '=', False]], {'count': True}) - assert ipa_users_cnt != 0 - - ipa_groups_cnt = call('group.query', [['local', '=', False]], {'count': True}) - assert ipa_groups_cnt != 0 - - -@pytest.mark.parametrize('keytab_name', [ - 'IPA_MACHINE_ACCOUNT', - 'IPA_NFS_KEYTAB', - 'IPA_SMB_KEYTAB' -]) -def test_keytabs_exist(do_freeipa_connection, keytab_name): - call('kerberos.keytab.query', [['name', '=', keytab_name]], {'get': True}) - - -def test_check_kerberos_ticket(do_freeipa_connection): - tkt = call('kerberos.check_ticket') - - assert tkt['name_type'] == 'KERBEROS_PRINCIPAL' - assert tkt['name'].startswith(do_freeipa_connection['kerberos_principal']) - - -def test_certificate(do_freeipa_connection): - call('certificateauthority.query', [['name', '=', 'IPA_DOMAIN_CACERT']], {'get': True}) - - -def test_system_keytab_has_nfs_principal(do_freeipa_connection): - assert call('kerberos.keytab.has_nfs_principal') - - -def test_smb_keytab_exists(do_freeipa_connection): - call('filesystem.stat', '/etc/ipa/smb.keytab') - - -def test_admin_privilege(do_freeipa_connection, enable_ds_auth): - ipa_config = call('ldap.ipa_config') - - priv_names = [priv['name'] for priv in call('privilege.query')] - assert ipa_config['domain'].upper() in priv_names - - priv = call('privilege.query', [['name', '=', ipa_config['domain'].upper()]], {'get': True}) - admins_grp = call('group.get_group_obj', {'groupname': 'admins', 'sid_info': True}) - - assert len(priv['ds_groups']) == 1 - assert priv['ds_groups'][0]['gid'] == admins_grp['gr_gid'] - assert priv['ds_groups'][0]['sid'] == admins_grp['sid'] - - assert priv['roles'] == ['FULL_ADMIN'] - - with client(auth=('ipaadmin', FREEIPA_ADMIN_BINDPW)) as c: - me = c.call('auth.me') - - assert 'DIRECTORY_SERVICE' in me['account_attributes'] - assert 'LDAP' in me['account_attributes'] - assert me['privilege']['roles'] == set(priv['roles']) - - -def test_dns_resolution(do_freeipa_connection): - ipa_config = do_freeipa_connection['ipa_config'] - - addresses = call('dnsclient.forward_lookup', {'names': [ipa_config['host']]}) - assert len(addresses) != 0 diff --git a/tests/api2/test_ipa_leave.py b/tests/api2/test_ipa_leave.py deleted file mode 100644 index b90fe7bd2de9e..0000000000000 --- a/tests/api2/test_ipa_leave.py +++ /dev/null @@ -1,74 +0,0 @@ -import errno -import pytest - -from middlewared.service_exception import CallError -from middlewared.test.integration.assets.directory_service import ipa -from middlewared.test.integration.utils import call - - -@pytest.fixture(scope="module") -def ipa_config(): - """ join then leave IPA domain so that we can evaluate server after leaving the IPA domain """ - with ipa() as config: - ipa_config = config['ipa_config'] - - yield ipa_config - - -def test_cache_cleared(ipa_config): - ipa_users_cnt = call('user.query', [['local', '=', False]], {'count': True}) - assert ipa_users_cnt == 0 - - ipa_groups_cnt = call('group.query', [['local', '=', False]], {'count': True}) - assert ipa_groups_cnt == 0 - - -@pytest.mark.parametrize('keytab_name', [ - 'IPA_MACHINE_ACCOUNT', - 'IPA_NFS_KEYTAB', - 'IPA_SMB_KEYTAB' -]) -def test_keytabs_deleted(ipa_config, keytab_name): - kt = call('kerberos.keytab.query', [['name', '=', keytab_name]]) - assert len(kt) == 0 - - -def test_check_no_kerberos_ticket(ipa_config): - with pytest.raises(CallError) as ce: - call('kerberos.check_ticket') - - assert ce.value.errno == errno.ENOKEY - - -def test_check_no_kerberos_realm(ipa_config): - realms = call('kerberos.realm.query') - assert len(realms) == 0, str(realms) - - -def test_system_keytab_has_no_nfs_principal(ipa_config): - assert not call('kerberos.keytab.has_nfs_principal') - - -def test_smb_keytab_does_not_exist(ipa_config): - with pytest.raises(CallError) as ce: - call('filesystem.stat', '/etc/ipa/smb.keytab') - - assert ce.value.errno == errno.ENOENT - - -def test_no_admin_privilege(ipa_config): - priv = call('privilege.query', [['name', '=', ipa_config['domain'].upper()]]) - assert priv == [] - - -def test_no_certificate(ipa_config): - certs = call('certificateauthority.query', [['name', '=', 'IPA_DOMAIN_CACERT']]) - assert len(certs) == 0, str(certs) - - -def test_no_dns_resolution(ipa_config): - try: - results = call('dnsclient.forward_lookup', {'names': [ipa_config['host']]}) - assert len(results) == 0 - except Exception: - pass diff --git a/tests/api2/test_iscsi.py b/tests/api2/test_iscsi.py deleted file mode 100644 index d4e9b7f9faabc..0000000000000 --- a/tests/api2/test_iscsi.py +++ /dev/null @@ -1,70 +0,0 @@ -import pytest - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.iscsi import iscsi_extent -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call - - -def test__iscsi_extent__disk_choices(request): - with dataset("test zvol", {"type": "VOLUME", "volsize": 1048576}) as ds: - # Make snapshots available for devices - call("zfs.dataset.update", ds, {"properties": {"snapdev": {"parsed": "visible"}}}) - call("zfs.snapshot.create", {"dataset": ds, "name": "snap-1"}) - assert call("iscsi.extent.disk_choices") == { - f'zvol/{ds.replace(" ", "+")}': f'{ds} (1 MiB)', - f'zvol/{ds.replace(" ", "+")}@snap-1': f'{ds}@snap-1 [ro]', - } - - # Create new extent - with iscsi_extent({ - "name": "test_extent", - "type": "DISK", - "disk": f"zvol/{ds.replace(' ', '+')}", - }): - # Verify that zvol is not available in iscsi disk choices - assert call("iscsi.extent.disk_choices") == { - f'zvol/{ds.replace(" ", "+")}@snap-1': f'{ds}@snap-1 [ro]', - } - # Verify that zvol is not availabe in VM disk choices - # (and snapshot zvol is not available too as it is read-only) - assert call("vm.device.disk_choices") == {} - - -def test__iscsi_extent__create_with_invalid_disk_with_whitespace(request): - with dataset("test zvol", { - "type": "VOLUME", - "volsize": 1048576, - }) as ds: - with pytest.raises(ValidationErrors) as e: - with iscsi_extent({ - "name": "test_extent", - "type": "DISK", - "disk": f"zvol/{ds}", - }): - pass - - assert str(e.value) == ( - f"[EINVAL] iscsi_extent_create.disk: Device '/dev/zvol/{ds}' for volume '{ds}' does not exist\n" - ) - - -def test__iscsi_extent__locked(request): - with dataset("test zvol", { - "type": "VOLUME", - "volsize": 1048576, - "inherit_encryption": False, - "encryption": True, - "encryption_options": {"passphrase": "testtest"}, - }) as ds: - with iscsi_extent({ - "name": "test_extent", - "type": "DISK", - "disk": f"zvol/{ds.replace(' ', '+')}", - }) as extent: - assert not extent["locked"] - - call("pool.dataset.lock", ds, job=True) - - extent = call("iscsi.extent.get_instance", extent["id"]) - assert extent["locked"] diff --git a/tests/api2/test_iscsi_auth_crud_roles.py b/tests/api2/test_iscsi_auth_crud_roles.py deleted file mode 100644 index ed29e4f869132..0000000000000 --- a/tests/api2/test_iscsi_auth_crud_roles.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.roles import common_checks - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_AUTH_READ"]) -def test_read_role_can_read(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.auth.query", role, True, valid_role_exception=False) - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_AUTH_READ"]) -def test_read_role_cant_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.auth.create", role, False) - common_checks(unprivileged_user_fixture, "iscsi.auth.update", role, False) - common_checks(unprivileged_user_fixture, "iscsi.auth.delete", role, False) - - -@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_AUTH_WRITE"]) -def test_write_role_can_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.auth.create", role, True) - common_checks(unprivileged_user_fixture, "iscsi.auth.update", role, True) - common_checks(unprivileged_user_fixture, "iscsi.auth.delete", role, True) diff --git a/tests/api2/test_iscsi_auth_network.py b/tests/api2/test_iscsi_auth_network.py deleted file mode 100644 index 79df689d0a207..0000000000000 --- a/tests/api2/test_iscsi_auth_network.py +++ /dev/null @@ -1,190 +0,0 @@ -import contextlib -import ipaddress -import socket - -import pytest - -from middlewared.test.integration.assets.iscsi import target_login_test -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.utils.client import truenas_server - - -@pytest.fixture(scope="module") -def my_ip4(): - """See which of my IP addresses will be used to connect.""" - # Things can be complicated e.g. my NAT between the test runner - # and the target system Therefore, first try using ssh into the - # remote system and see what it thinks our IP address is. - try: - myip = ipaddress.ip_address(ssh('echo $SSH_CLIENT').split()[0]) - if myip.version != 4: - raise ValueError("Not a valid IPv4 address") - return str(myip) - except Exception: - # Fall back - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.settimeout(2) - result = sock.connect_ex((truenas_server.ip, 80)) - assert result == 0 - myip = sock.getsockname()[0] - sock.close() - # Check that we have an IPv4 address - socket.inet_pton(socket.AF_INET, myip) - return myip - - -@contextlib.contextmanager -def portal(): - portal_config = call('iscsi.portal.create', {'listen': [{'ip': truenas_server.ip}], 'discovery_authmethod': 'NONE'}) - try: - yield portal_config - finally: - call('iscsi.portal.delete', portal_config['id']) - - -@contextlib.contextmanager -def initiator(): - initiator_config = call('iscsi.initiator.create', {}) - try: - yield initiator_config - finally: - # Very likely that already cleaned up (by removing only target using it) - if call('iscsi.initiator.query', [['id', '=', initiator_config['id']]]): - call('iscsi.initiator.delete', initiator_config['id']) - - -@contextlib.contextmanager -def target(target_name, groups): - target_config = call('iscsi.target.create', {'name': target_name, 'groups': groups}) - try: - yield target_config - finally: - call('iscsi.target.delete', target_config['id']) - - -@contextlib.contextmanager -def extent(extent_name, zvol_name=None): - zvol_name = zvol_name or extent_name - with dataset(zvol_name, {'type': 'VOLUME', 'volsize': 51200, 'volblocksize': '512', 'sparse': True}) as zvol: - extent_config = call('iscsi.extent.create', {'name': extent_name, 'disk': f'zvol/{zvol}'}) - try: - yield extent_config - finally: - call('iscsi.extent.delete', extent_config['id']) - - -@contextlib.contextmanager -def target_extent(target_id, extent_id, lun_id): - target_extent_config = call( - 'iscsi.targetextent.create', {'target': target_id, 'extent': extent_id, 'lunid': lun_id} - ) - try: - yield target_extent_config - finally: - call('iscsi.targetextent.delete', target_extent_config['id']) - - -@contextlib.contextmanager -def configured_target_to_extent(): - with portal() as portal_config: - with initiator() as initiator_config: - with target( - 'test-target', groups=[{ - 'portal': portal_config['id'], - 'initiator': initiator_config['id'], - 'auth': None, - 'authmethod': 'NONE' - }] - ) as target_config: - with extent('test_extent') as extent_config: - with target_extent(target_config['id'], extent_config['id'], 1): - yield { - 'extent': extent_config, - 'target': target_config, - 'global': call('iscsi.global.config'), - 'portal': portal_config, - } - - -@contextlib.contextmanager -def configure_iscsi_service(): - with configured_target_to_extent() as iscsi_config: - try: - call('service.start', 'iscsitarget') - assert call('service.started', 'iscsitarget') is True - yield iscsi_config - finally: - call('service.stop', 'iscsitarget') - - -@pytest.mark.parametrize('valid', [True, False]) -def test_iscsi_auth_networks(valid): - with configure_iscsi_service() as config: - call( - 'iscsi.target.update', - config['target']['id'], - {'auth_networks': [] if valid else ['8.8.8.8/32']} - ) - portal_listen_details = config['portal']['listen'][0] - assert target_login_test( - f'{portal_listen_details["ip"]}:{portal_listen_details["port"]}', - f'{config["global"]["basename"]}:{config["target"]["name"]}', - ) is valid - - -@pytest.mark.parametrize('valid', [True, False]) -def test_iscsi_auth_networks_exact_ip(my_ip4, valid): - with configure_iscsi_service() as config: - call( - 'iscsi.target.update', - config['target']['id'], - {'auth_networks': [f"{my_ip4}/32"] if valid else ['8.8.8.8/32']} - ) - portal_listen_details = config['portal']['listen'][0] - assert target_login_test( - f'{portal_listen_details["ip"]}:{portal_listen_details["port"]}', - f'{config["global"]["basename"]}:{config["target"]["name"]}', - ) is valid - - -@pytest.mark.parametrize('valid', [True, False]) -def test_iscsi_auth_networks_netmask_24(my_ip4, valid): - # good_ip will be our IP with the last byte cleared. - good_ip = '.'.join(my_ip4.split('.')[:-1] + ['0']) - # bad_ip will be our IP with the second last byte changed and last byte cleared - n = (int(my_ip4.split('.')[2]) + 1) % 256 - bad_ip = '.'.join(good_ip.split('.')[:2] + [str(n), '0']) - with configure_iscsi_service() as config: - call( - 'iscsi.target.update', - config['target']['id'], - {'auth_networks': ["8.8.8.8/24", f"{good_ip}/24"] if valid else ["8.8.8.8/24", f"{bad_ip}/24"]} - ) - portal_listen_details = config['portal']['listen'][0] - assert target_login_test( - f'{portal_listen_details["ip"]}:{portal_listen_details["port"]}', - f'{config["global"]["basename"]}:{config["target"]["name"]}', - ) is valid - - -@pytest.mark.parametrize('valid', [True, False]) -def test_iscsi_auth_networks_netmask_16(my_ip4, valid): - # good_ip will be our IP with the second last byte changed and last byte cleared - n = (int(my_ip4.split('.')[2]) + 1) % 256 - good_ip = '.'.join(my_ip4.split('.')[:2] + [str(n), '0']) - # bad_ip will be the good_ip with the second byte changed - ip_list = good_ip.split('.') - n = (int(ip_list[1]) + 1) % 256 - bad_ip = '.'.join([ip_list[0], str(n)] + ip_list[-2:]) - with configure_iscsi_service() as config: - call( - 'iscsi.target.update', - config['target']['id'], - {'auth_networks': ["8.8.8.8/16", f"{good_ip}/16"] if valid else ["8.8.8.8/16", f"{bad_ip}/16"]} - ) - portal_listen_details = config['portal']['listen'][0] - assert target_login_test( - f'{portal_listen_details["ip"]}:{portal_listen_details["port"]}', - f'{config["global"]["basename"]}:{config["target"]["name"]}', - ) is valid diff --git a/tests/api2/test_iscsi_extent_crud_roles.py b/tests/api2/test_iscsi_extent_crud_roles.py deleted file mode 100644 index eae622008b59c..0000000000000 --- a/tests/api2/test_iscsi_extent_crud_roles.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.roles import common_checks - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_EXTENT_READ"]) -def test_read_role_can_read(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.extent.query", role, True, valid_role_exception=False) - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_EXTENT_READ"]) -def test_read_role_cant_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.extent.create", role, False) - common_checks(unprivileged_user_fixture, "iscsi.extent.update", role, False) - common_checks(unprivileged_user_fixture, "iscsi.extent.delete", role, False) - - -@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_EXTENT_WRITE"]) -def test_write_role_can_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.extent.create", role, True) - common_checks(unprivileged_user_fixture, "iscsi.extent.update", role, True) - common_checks(unprivileged_user_fixture, "iscsi.extent.delete", role, True) diff --git a/tests/api2/test_iscsi_global_crud_roles.py b/tests/api2/test_iscsi_global_crud_roles.py deleted file mode 100644 index 971ead1d14380..0000000000000 --- a/tests/api2/test_iscsi_global_crud_roles.py +++ /dev/null @@ -1,21 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.roles import common_checks - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_GLOBAL_READ"]) -def test_read_role_can_read(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.global.config", role, True, valid_role_exception=False) - common_checks(unprivileged_user_fixture, "iscsi.global.sessions", role, True, valid_role_exception=False) - common_checks(unprivileged_user_fixture, "iscsi.global.client_count", role, True, valid_role_exception=False) - common_checks(unprivileged_user_fixture, "iscsi.global.alua_enabled", role, True, valid_role_exception=False) - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_GLOBAL_READ"]) -def test_read_role_cant_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.global.update", role, False) - - -@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_GLOBAL_WRITE"]) -def test_write_role_can_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.global.update", role, True) diff --git a/tests/api2/test_iscsi_host_crud_roles.py b/tests/api2/test_iscsi_host_crud_roles.py deleted file mode 100644 index 8f8615df05cf6..0000000000000 --- a/tests/api2/test_iscsi_host_crud_roles.py +++ /dev/null @@ -1,26 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.roles import common_checks - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_HOST_READ"]) -def test_read_role_can_read(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.host.query", role, True, valid_role_exception=False) - common_checks(unprivileged_user_fixture, "iscsi.host.get_initiators", role, True) - common_checks(unprivileged_user_fixture, "iscsi.host.get_targets", role, True) - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_HOST_READ"]) -def test_read_role_cant_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.host.create", role, False) - common_checks(unprivileged_user_fixture, "iscsi.host.update", role, False) - common_checks(unprivileged_user_fixture, "iscsi.host.delete", role, False) - common_checks(unprivileged_user_fixture, "iscsi.host.set_initiators", role, False) - - -@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_HOST_WRITE"]) -def test_write_role_can_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.host.create", role, True) - common_checks(unprivileged_user_fixture, "iscsi.host.update", role, True) - common_checks(unprivileged_user_fixture, "iscsi.host.delete", role, True) - common_checks(unprivileged_user_fixture, "iscsi.host.set_initiators", role, True) diff --git a/tests/api2/test_iscsi_initiator_crud_roles.py b/tests/api2/test_iscsi_initiator_crud_roles.py deleted file mode 100644 index f12ae6d9c8d70..0000000000000 --- a/tests/api2/test_iscsi_initiator_crud_roles.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.roles import common_checks - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_INITIATOR_READ"]) -def test_read_role_can_read(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.initiator.query", role, True, valid_role_exception=False) - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_INITIATOR_READ"]) -def test_read_role_cant_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.initiator.create", role, False) - common_checks(unprivileged_user_fixture, "iscsi.initiator.update", role, False) - common_checks(unprivileged_user_fixture, "iscsi.initiator.delete", role, False) - - -@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_INITIATOR_WRITE"]) -def test_write_role_can_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.initiator.create", role, True) - common_checks(unprivileged_user_fixture, "iscsi.initiator.update", role, True) - common_checks(unprivileged_user_fixture, "iscsi.initiator.delete", role, True) diff --git a/tests/api2/test_iscsi_portal_crud_roles.py b/tests/api2/test_iscsi_portal_crud_roles.py deleted file mode 100644 index 882a483062dc5..0000000000000 --- a/tests/api2/test_iscsi_portal_crud_roles.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.roles import common_checks - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_PORTAL_READ"]) -def test_read_role_can_read(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.portal.query", role, True, valid_role_exception=False) - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_PORTAL_READ"]) -def test_read_role_cant_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.portal.create", role, False) - common_checks(unprivileged_user_fixture, "iscsi.portal.update", role, False) - common_checks(unprivileged_user_fixture, "iscsi.portal.delete", role, False) - - -@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_PORTAL_WRITE"]) -def test_write_role_can_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.portal.create", role, True) - common_checks(unprivileged_user_fixture, "iscsi.portal.update", role, True) - common_checks(unprivileged_user_fixture, "iscsi.portal.delete", role, True) diff --git a/tests/api2/test_iscsi_target_crud_roles.py b/tests/api2/test_iscsi_target_crud_roles.py deleted file mode 100644 index 1991c2a3871e3..0000000000000 --- a/tests/api2/test_iscsi_target_crud_roles.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.roles import common_checks - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_TARGET_READ"]) -def test_read_role_can_read(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.target.query", role, True, valid_role_exception=False) - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_TARGET_READ"]) -def test_read_role_cant_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.target.create", role, False) - common_checks(unprivileged_user_fixture, "iscsi.target.update", role, False) - common_checks(unprivileged_user_fixture, "iscsi.target.delete", role, False) - - -@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_TARGET_WRITE"]) -def test_write_role_can_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.target.create", role, True) - common_checks(unprivileged_user_fixture, "iscsi.target.update", role, True) - common_checks(unprivileged_user_fixture, "iscsi.target.delete", role, True) diff --git a/tests/api2/test_iscsi_targetextent_crud_roles.py b/tests/api2/test_iscsi_targetextent_crud_roles.py deleted file mode 100644 index 107c880c91890..0000000000000 --- a/tests/api2/test_iscsi_targetextent_crud_roles.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.roles import common_checks - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_TARGETEXTENT_READ"]) -def test_read_role_can_read(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.targetextent.query", role, True, valid_role_exception=False) - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_TARGETEXTENT_READ"]) -def test_read_role_cant_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.targetextent.create", role, False) - common_checks(unprivileged_user_fixture, "iscsi.targetextent.update", role, False) - common_checks(unprivileged_user_fixture, "iscsi.targetextent.delete", role, False) - - -@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_TARGETEXTENT_WRITE"]) -def test_write_role_can_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "iscsi.targetextent.create", role, True) - common_checks(unprivileged_user_fixture, "iscsi.targetextent.update", role, True) - common_checks(unprivileged_user_fixture, "iscsi.targetextent.delete", role, True) diff --git a/tests/api2/test_job_credentials.py b/tests/api2/test_job_credentials.py deleted file mode 100644 index c95984ed91d60..0000000000000 --- a/tests/api2/test_job_credentials.py +++ /dev/null @@ -1,18 +0,0 @@ -from middlewared.test.integration.assets.account import unprivileged_user_client -from middlewared.test.integration.utils import call, mock - - -def test_job_credentials(): - with mock("test.test1", """ - from middlewared.service import job - - @job() - def mock(self, job, *args): - return 42 - """): - with unprivileged_user_client(allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c: - job_id = c.call("test.test1") - - job = call("core.get_jobs", [["id", "=", job_id]], {"get": True}) - - assert job["credentials"] == {"type": "LOGIN_PASSWORD", "data": {"username": c.username}} diff --git a/tests/api2/test_job_errno.py b/tests/api2/test_job_errno.py deleted file mode 100644 index 766dd9dc64e1d..0000000000000 --- a/tests/api2/test_job_errno.py +++ /dev/null @@ -1,27 +0,0 @@ -import pytest - -from middlewared.test.integration.utils import call, mock -from truenas_api_client import ClientException - - -def test_job_errno(): - - with mock("test.test1", """ - from middlewared.service import job - from middlewared.schema import returns, Password - from middlewared.service_exception import CallError - - @job() - @returns(Password("my_password")) - def mock(self, job, *args): - raise CallError("canary", 13) - """): - job_id = call("test.test1") - - with pytest.raises(ClientException): - call("core.job_wait", job_id, job=True) - - result = call("core.get_jobs", [["id", "=", job_id]], {"get": True}) - - assert "errno" in result["exc_info"] - assert result["exc_info"]["errno"] == 13 diff --git a/tests/api2/test_job_events.py b/tests/api2/test_job_events.py deleted file mode 100644 index 726de846543aa..0000000000000 --- a/tests/api2/test_job_events.py +++ /dev/null @@ -1,60 +0,0 @@ -import pprint - -from middlewared.test.integration.assets.account import unprivileged_user_client -from middlewared.test.integration.utils import call, client, mock - - -def test_successful_job_events(): - with mock("test.test1", """ - from middlewared.service import job - - @job() - def mock(self, job, *args): - return 42 - """): - with client() as c: - events = [] - - def callback(type, **message): - events.append((type, message)) - - c.subscribe("core.get_jobs", callback, sync=True) - c.call("test.test1", job=True) - - # FIXME: Sometimes an equal message for `SUCCESS` state is being sent (or received) twice, we were not able - # to understand why and this does not break anything so we are not willing to waste our time investigating - # this. - if len(events) == 4 and events[2] == events[3]: - events = events[:3] - - assert len(events) == 3, pprint.pformat(events, indent=2) - assert events[0][0] == "ADDED" - assert events[0][1]["fields"]["state"] == "WAITING" - assert events[1][0] == "CHANGED" - assert events[1][1]["fields"]["state"] == "RUNNING" - assert events[2][0] == "CHANGED" - assert events[2][1]["fields"]["state"] == "SUCCESS" - assert events[2][1]["fields"]["result"] == 42 - - -def test_unprivileged_user_only_sees_its_own_jobs_events(): - with mock("test.test1", """ - from middlewared.service import job - - @job() - def mock(self, job, *args): - return 42 - """): - with unprivileged_user_client(allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c: - events = [] - - def callback(type, **message): - events.append((type, message)) - - c.subscribe("core.get_jobs", callback, sync=True) - - call("test.test1", "secret", job=True) - c.call("test.test1", "not secret", job=True) - - assert all(event[1]["fields"]["arguments"] == ["not secret"] - for event in events), pprint.pformat(events, indent=2) diff --git a/tests/api2/test_job_lock.py b/tests/api2/test_job_lock.py deleted file mode 100644 index c09223c2f8e68..0000000000000 --- a/tests/api2/test_job_lock.py +++ /dev/null @@ -1,126 +0,0 @@ -import contextlib -import os -import time - -import pytest - -from middlewared.test.integration.utils import call, mock, ssh - - -@pytest.mark.flaky(reruns=5, reruns_delay=5) -def test_jobs_execute_in_parallel(): - with mock("test.test1", """ - from middlewared.service import job - - @job() - def mock(self, job, *args): - import time - time.sleep(5) - """): - start = time.monotonic() - - j1 = call("test.test1") - j2 = call("test.test1") - j3 = call("test.test1") - - call("core.job_wait", j1, job=True) - call("core.job_wait", j2, job=True) - call("core.job_wait", j3, job=True) - - assert time.monotonic() - start < 6 - - -@pytest.mark.flaky(reruns=5, reruns_delay=5) -def test_jobs_execute_sequentially_when_there_is_a_lock(): - with mock("test.test1", """ - from middlewared.service import job - - @job(lock="test") - def mock(self, job, *args): - import time - time.sleep(5) - """): - start = time.monotonic() - - j1 = call("test.test1") - j2 = call("test.test1") - j3 = call("test.test1") - - call("core.job_wait", j1, job=True) - call("core.job_wait", j2, job=True) - call("core.job_wait", j3, job=True) - - assert time.monotonic() - start >= 15 - - -@pytest.mark.flaky(reruns=5, reruns_delay=5) -def test_lock_with_argument(): - with mock("test.test1", """ - from middlewared.service import job - - @job(lock=lambda args: f"test.{args[0]}") - def mock(self, job, s): - import time - time.sleep(5) - """): - start = time.monotonic() - - j1 = call("test.test1", "a") - j2 = call("test.test1", "b") - j3 = call("test.test1", "a") - - call("core.job_wait", j1, job=True) - call("core.job_wait", j2, job=True) - call("core.job_wait", j3, job=True) - - assert 10 <= time.monotonic() - start < 15 - - -@pytest.mark.flaky(reruns=5, reruns_delay=5) -def test_lock_queue_size(): - try: - with mock("test.test1", """ - from middlewared.service import job - - @job(lock="test", lock_queue_size=1) - def mock(self, job, *args): - with open("/tmp/test", "a") as f: - f.write("a\\n") - - import time - time.sleep(5) - """): - j1 = call("test.test1") - j2 = call("test.test1") - j3 = call("test.test1") - j4 = call("test.test1") - - call("core.job_wait", j1, job=True) - call("core.job_wait", j2, job=True) - call("core.job_wait", j3, job=True) - call("core.job_wait", j4, job=True) - - assert ssh("cat /tmp/test") == "a\na\n" - - assert j3 == j2 - assert j4 == j2 - finally: - with contextlib.suppress(FileNotFoundError): - os.unlink("/tmp/test") - - -def test_call_sync_a_job_with_lock(): - with mock("test.test1", """ - from middlewared.service import job - - def mock(self): - return self.middleware.call_sync("test.test2").wait_sync() - """): - with mock("test.test2", """ - from middlewared.service import job - - @job(lock="test") - def mock(self, job, *args): - return 42 - """): - assert call("test.test1") == 42 diff --git a/tests/api2/test_job_logs.py b/tests/api2/test_job_logs.py deleted file mode 100644 index c6240c7befff0..0000000000000 --- a/tests/api2/test_job_logs.py +++ /dev/null @@ -1,27 +0,0 @@ -import requests - -from middlewared.test.integration.assets.account import unprivileged_user_client -from middlewared.test.integration.utils import mock, url - - -def test_job_download_logs(): - with mock("test.test1", """ - from middlewared.service import job - - @job(logs=True) - def mock(self, job, *args): - job.logs_fd.write(b'Job logs') - """): - with unprivileged_user_client(allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c: - jid = c.call("test.test1") - - c.call("core.job_wait", jid, job=True) - - path = c.call("core.job_download_logs", jid, 'logs.txt') - - r = requests.get(f"{url()}{path}") - r.raise_for_status() - - assert r.headers["Content-Disposition"] == "attachment; filename=\"logs.txt\"" - assert r.headers["Content-Type"] == "application/octet-stream" - assert r.text == "Job logs" diff --git a/tests/api2/test_job_result.py b/tests/api2/test_job_result.py deleted file mode 100644 index c596a8dd6926b..0000000000000 --- a/tests/api2/test_job_result.py +++ /dev/null @@ -1,27 +0,0 @@ -from middlewared.test.integration.utils import call, mock - - -def test_job_result(): - - with mock("test.test1", """ - from middlewared.service import job - from middlewared.schema import returns, Password - - @job() - @returns(Password("my_password")) - def mock(self, job, *args): - return "canary" - """): - job_id = call("test.test1") - - result = call("core.job_wait", job_id, job=True) - # Waiting for result should give unredacted version - assert result == "canary" - - # Querying by default should redact - job = call("core.get_jobs", [["id", "=", job_id]], {"get": True}) - assert job["result"] != "canary" - - # but we should also be able to get unredacted result if needed - job = call("core.get_jobs", [["id", "=", job_id]], {"get": True, "extra": {"raw_result": True}}) - assert job["result"] == "canary" diff --git a/tests/api2/test_keychain_ssh.py b/tests/api2/test_keychain_ssh.py deleted file mode 100644 index d711b2630ded6..0000000000000 --- a/tests/api2/test_keychain_ssh.py +++ /dev/null @@ -1,91 +0,0 @@ -import pytest - -from middlewared.service_exception import CallError -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call - - -@pytest.fixture(scope="module") -def credential(): - credential = call("keychaincredential.create", { - "name": "key", - "type": "SSH_KEY_PAIR", - "attributes": call("keychaincredential.generate_ssh_key_pair"), - }) - try: - yield credential - finally: - call("keychaincredential.delete", credential["id"]) - - -def test_remote_ssh_semiautomatic_setup_invalid_homedir(credential): - with user({ - "username": "admin", - "full_name": "admin", - "group_create": True, - "home_create": False, - "password": "test1234", - }): - token = call("auth.generate_token") - with pytest.raises(CallError) as ve: - call("keychaincredential.remote_ssh_semiautomatic_setup", { - "name": "localhost", - "url": "http://localhost", - "token": token, - "username": "admin", - "private_key": credential["id"], - }) - - assert "make sure that home directory for admin user on the remote system exists" in ve.value.errmsg - - -def test_remote_ssh_semiautomatic_setup_sets_user_attributes(credential): - with dataset("unpriv_homedir") as homedir: - with user({ - "username": "unpriv", - "full_name": "unpriv", - "group_create": True, - "home": f"/mnt/{homedir}", - "password_disabled": True, - "smb": False, - "shell": "/usr/sbin/nologin", - }): - token = call("auth.generate_token") - connection = call("keychaincredential.remote_ssh_semiautomatic_setup", { - "name": "localhost", - "url": "http://localhost", - "token": token, - "username": "unpriv", - "private_key": credential["id"], - }) - try: - call("replication.list_datasets", "SSH", connection["id"]) - finally: - call("keychaincredential.delete", connection["id"]) - - -def test_ssl_certificate_error(credential): - token = call("auth.generate_token") - with pytest.raises(CallError) as ve: - call("keychaincredential.remote_ssh_semiautomatic_setup", { - "name": "localhost", - # Should fail on default self-signed certificate - "url": "https://localhost", - "token": token, - "private_key": credential["id"], - }) - - assert ve.value.errno == CallError.ESSLCERTVERIFICATIONERROR - - -def test_ignore_ssl_certificate_error(credential): - token = call("auth.generate_token") - connection = call("keychaincredential.remote_ssh_semiautomatic_setup", { - "name": "localhost", - "url": "https://localhost", - "verify_ssl": False, - "token": token, - "private_key": credential["id"], - }) - call("keychaincredential.delete", connection["id"]) diff --git a/tests/api2/test_localhost_ws_auth.py b/tests/api2/test_localhost_ws_auth.py deleted file mode 100644 index 36cb392fef9a9..0000000000000 --- a/tests/api2/test_localhost_ws_auth.py +++ /dev/null @@ -1,11 +0,0 @@ -from middlewared.test.integration.utils import ssh - - -def test__authentication_required_localhost(): - cmd = 'midclt -u ws://localhost/websocket call user.query' - resp = ssh(cmd, check=False, complete_response=True) - - assert not resp['result'] - - assert 'Not authenticated' in resp['stderr'] - diff --git a/tests/api2/test_lock.py b/tests/api2/test_lock.py deleted file mode 100644 index 7cd43eb5e6859..0000000000000 --- a/tests/api2/test_lock.py +++ /dev/null @@ -1,67 +0,0 @@ -import time - -import pytest - -from middlewared.test.integration.utils import client, mock - - -@pytest.mark.flaky(reruns=5, reruns_delay=5) -def test_no_lock(): - with mock("test.test1", """ - from middlewared.service import lock - - async def mock(self, *args): - import asyncio - await asyncio.sleep(5) - """): - start = time.monotonic() - - with client() as c: - c1 = c.call("test.test1", background=True) - c2 = c.call("test.test1", background=True) - c.wait(c1) - c.wait(c2) - - assert time.monotonic() - start < 6 - - -@pytest.mark.flaky(reruns=5, reruns_delay=5) -def test_async_lock(): - with mock("test.test1", """ - from middlewared.service import lock - - @lock("test") - async def mock(self, *args): - import asyncio - await asyncio.sleep(5) - """): - start = time.monotonic() - - with client() as c: - c1 = c.call("test.test1", background=True) - c2 = c.call("test.test1", background=True) - c.wait(c1) - c.wait(c2) - - assert time.monotonic() - start >= 10 - - -@pytest.mark.flaky(reruns=5, reruns_delay=5) -def test_threading_lock(): - with mock("test.test1", """ - from middlewared.service import lock - - @lock("test") - def mock(self, *args): - import time - time.sleep(5) - """): - start = time.monotonic() - - with client() as c: - c1 = c.call("test.test1", background=True) - c2 = c.call("test.test1", background=True) - c.wait(c1) - c.wait(c2) - - assert time.monotonic() - start >= 10 diff --git a/tests/api2/test_network_configuration.py b/tests/api2/test_network_configuration.py deleted file mode 100644 index 02685c2e38791..0000000000000 --- a/tests/api2/test_network_configuration.py +++ /dev/null @@ -1,41 +0,0 @@ -from middlewared.test.integration.utils import call, ssh - -from auto_config import ha - -NEW_HOSTNAME = 'dummy123' - - -def fetch_hostname(): - name = ssh('hostname').strip() - if ha: - return name.removesuffix('-nodea').removesuffix('-nodeb') - return name - - -def config_read_hostname(): - config = call('network.configuration.config') - if ha: - return config['hostname_virtual'] - else: - return config['hostname'] - - -def config_set_hostname(name): - if ha: - payload = {'hostname': f'{name}-nodea', - 'hostname_b': f'{name}-nodeb', - 'hostname_virtual': name} - else: - payload = {'hostname': name} - call('network.configuration.update', payload) - - -def test_changing_hostname(): - current_hostname = config_read_hostname() - - config_set_hostname(NEW_HOSTNAME) - try: - assert fetch_hostname() == NEW_HOSTNAME - finally: - config_set_hostname(current_hostname) - assert fetch_hostname() == current_hostname diff --git a/tests/api2/test_nfs_share_crud_roles.py b/tests/api2/test_nfs_share_crud_roles.py deleted file mode 100644 index 2606c049aad5d..0000000000000 --- a/tests/api2/test_nfs_share_crud_roles.py +++ /dev/null @@ -1,39 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.roles import common_checks - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_NFS_READ"]) -def test_read_role_can_read(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "sharing.nfs.query", role, True, valid_role_exception=False) - common_checks(unprivileged_user_fixture, "nfs.client_count", role, True, valid_role_exception=False) - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_NFS_READ"]) -def test_read_role_cant_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "sharing.nfs.create", role, False) - common_checks(unprivileged_user_fixture, "sharing.nfs.update", role, False) - common_checks(unprivileged_user_fixture, "sharing.nfs.delete", role, False) - common_checks(unprivileged_user_fixture, "nfs.get_nfs3_clients", role, False) - common_checks(unprivileged_user_fixture, "nfs.get_nfs4_clients", role, False) - - -@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_NFS_WRITE"]) -def test_write_role_can_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "sharing.nfs.create", role, True) - common_checks(unprivileged_user_fixture, "sharing.nfs.update", role, True) - common_checks(unprivileged_user_fixture, "sharing.nfs.delete", role, True) - common_checks(unprivileged_user_fixture, "nfs.get_nfs3_clients", role, True, valid_role_exception=False) - common_checks(unprivileged_user_fixture, "nfs.get_nfs4_clients", role, True, valid_role_exception=False) - common_checks( - unprivileged_user_fixture, "service.start", role, True, method_args=["nfs"], valid_role_exception=False - ) - common_checks( - unprivileged_user_fixture, "service.restart", role, True, method_args=["nfs"], valid_role_exception=False - ) - common_checks( - unprivileged_user_fixture, "service.reload", role, True, method_args=["nfs"], valid_role_exception=False - ) - common_checks( - unprivileged_user_fixture, "service.stop", role, True, method_args=["nfs"], valid_role_exception=False - ) diff --git a/tests/api2/test_openapi.py b/tests/api2/test_openapi.py deleted file mode 100644 index 975d66a428185..0000000000000 --- a/tests/api2/test_openapi.py +++ /dev/null @@ -1,16 +0,0 @@ -import pytest -import sys -import os - -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET - - - -@pytest.mark.parametrize('protocol,force_ssl', [('http', False), ('https', True)]) -def test_protocol_reported_correctly(protocol, force_ssl): - response = GET('', force_ssl=force_ssl) - server_urls = response.json()['servers'] - for url_dict in filter(lambda d: 'url' in d, server_urls): - assert url_dict['url'].startswith(protocol) is True, url_dict diff --git a/tests/api2/test_password_reset.py b/tests/api2/test_password_reset.py deleted file mode 100644 index 023686ce83818..0000000000000 --- a/tests/api2/test_password_reset.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python3 -import errno -import pytest -import secrets -import string - -from middlewared.service_exception import CallError, ValidationErrors -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.account import unprivileged_user -from middlewared.test.integration.utils import call, client -from middlewared.test.integration.utils.audit import expect_audit_method_calls - - -TEST_USERNAME = 'testpasswduser' -TEST_USERNAME_2 = 'testpasswduser2' -TEST_GROUPNAME = 'testpasswdgroup' -TEST_PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) -TEST_PASSWORD_2 = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) -TEST_PASSWORD2 = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) -TEST_PASSWORD2_2 = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) -REDACTED = '********' - - -def test_restricted_user_set_password(): - with unprivileged_user( - username=TEST_USERNAME, - group_name=TEST_GROUPNAME, - privilege_name='TEST_PASSWD_RESET_PRIVILEGE', - allowlist=[], - web_shell=False, - roles=['READONLY_ADMIN'] - ) as acct: - with client(auth=(acct.username, acct.password)) as c: - payload = { - 'username': acct.username, - 'old_password': acct.password, - 'new_password': TEST_PASSWORD - } - - # Password reset using existing password and current user should work - with expect_audit_method_calls([{ - 'method': 'user.set_password', - 'params': [{ - 'username': acct.username, - 'old_password': REDACTED, - 'new_password': REDACTED - }], - 'description': f'Set account password {acct.username}', - }]): - c.call('user.set_password', payload) - - # Should be able to create new client session with new password - with client(auth=(acct.username, TEST_PASSWORD)) as c2: - c2.call('auth.me') - - # FULL_ADMIN privileges should also allow password reset: - call('user.set_password', { - 'username': acct.username, - 'old_password': TEST_PASSWORD, - 'new_password': TEST_PASSWORD_2 - }) - - # FULL_ADMIN should also be able to skip password checks - call('user.set_password', { - 'username': acct.username, - 'new_password': TEST_PASSWORD_2, - }) - - group_id = call('group.query', [['group', '=', TEST_GROUPNAME]], {'get': True})['id'] - - # Create additional user with READONLY privilege - with user({ - 'username': TEST_USERNAME_2, - 'full_name': TEST_USERNAME_2, - 'group_create': True, - 'groups': [group_id], - 'smb': False, - 'password': TEST_PASSWORD2 - }) as u: - with client(auth=(TEST_USERNAME_2, TEST_PASSWORD2)) as c2: - # Limited users should not be able to change other - # passwords of other users - with pytest.raises(CallError) as ve: - c2.call('user.set_password', { - 'username': acct.username, - 'old_password': TEST_PASSWORD_2, - 'new_password': 'CANARY' - }) - - assert ve.value.errno == errno.EPERM - - with pytest.raises(ValidationErrors) as ve: - # Limited users should not be able to skip password checks - c2.call('user.set_password', { - 'username': TEST_USERNAME_2, - 'new_password': 'CANARY', - }) - - call("user.update", u['id'], {'password_disabled': True}) - with pytest.raises(ValidationErrors) as ve: - # This should fail because we've disabled password auth - call('user.set_password', { - 'username': TEST_USERNAME_2, - 'old_password': TEST_PASSWORD2, - 'new_password': 'CANARY' - }) - - call("user.update", u['id'], { - 'password_disabled': False, - 'locked': True - }) - - with pytest.raises(ValidationErrors) as ve: - # This should fail because we've locked account - call('user.set_password', { - 'username': TEST_USERNAME_2, - 'old_password': TEST_PASSWORD2, - 'new_password': 'CANARY' - }) - - call("user.update", u['id'], { - 'password_disabled': False, - 'locked': False - }) - - # Unlocking user should allow password reset to succeed - with client(auth=(TEST_USERNAME_2, TEST_PASSWORD2)) as c2: - c2.call('user.set_password', { - 'username': TEST_USERNAME_2, - 'old_password': TEST_PASSWORD2, - 'new_password': TEST_PASSWORD2_2 - }) diff --git a/tests/api2/test_pool_attach.py b/tests/api2/test_pool_attach.py deleted file mode 100644 index cd0b039f82574..0000000000000 --- a/tests/api2/test_pool_attach.py +++ /dev/null @@ -1,26 +0,0 @@ -from middlewared.test.integration.assets.pool import another_pool -from middlewared.test.integration.utils import call, ssh - - -def test_attach_raidz1_vdev(): - with another_pool(topology=(6, lambda disks: { - "data": [ - { - "type": "RAIDZ1", - "disks": disks[0:3] - }, - { - "type": "RAIDZ1", - "disks": disks[3:6] - }, - ], - })) as pool: - disk = call("disk.get_unused")[0]["name"] - - call("pool.attach", pool["id"], { - "target_vdev": pool["topology"]["data"][0]["guid"], - "new_disk": disk, - }, job=True) - - pool = call("pool.get_instance", pool["id"]) - assert pool["expand"]["state"] == "FINISHED" diff --git a/tests/api2/test_pool_dataset_acl.py b/tests/api2/test_pool_dataset_acl.py deleted file mode 100644 index 948cef934d6b5..0000000000000 --- a/tests/api2/test_pool_dataset_acl.py +++ /dev/null @@ -1,104 +0,0 @@ -import dataclasses -import errno - -import pytest - -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, ssh -from truenas_api_client import ClientException - - -@dataclasses.dataclass -class AclIds: - user_to_add: int = 8765309 - user2_to_add: int = 8765310 - group_to_add: int = 1138 - - -def check_for_entry(acl, id_type, xid, perms, is_posix=False): - has_entry = has_default = has_access = False - for ace in acl: - if ace['id'] == xid and ace['tag'] == id_type and ace['perms'] == perms: - if is_posix: - if ace['default']: - assert has_default is False - has_default = True - else: - assert has_access is False - has_access = True - - else: - assert has_entry is False - has_entry = True - - return has_entry or (has_access and has_default) - - -def test_simplified_apps_api_posix_acl(): - posix_acl = [ - {'id_type': 'USER', 'id': AclIds.user_to_add, 'access': 'MODIFY'}, - {'id_type': 'GROUP', 'id': AclIds.group_to_add, 'access': 'READ'}, - {'id_type': 'USER', 'id': AclIds.user_to_add, 'access': 'FULL_CONTROL'}, - ] - with dataset('APPS_POSIX') as ds: - ds_path = f'/mnt/{ds}' - call('filesystem.add_to_acl', {'path': ds_path, 'entries': posix_acl}, job=True) - acl = call('filesystem.getacl', ds_path)['acl'] - assert check_for_entry( - acl, - 'USER', - AclIds.user_to_add, - {'READ': True, 'WRITE': True, 'EXECUTE': True}, True - ), acl - assert check_for_entry( - acl, - 'GROUP', - AclIds.group_to_add, - {'READ': True, 'WRITE': False, 'EXECUTE': True}, True - ), acl - - -def test_simplified_apps_api_nfs4_acl(request): - nfs4_acl = [ - {'id_type': 'USER', 'id': AclIds.user_to_add, 'access': 'MODIFY'}, - {'id_type': 'GROUP', 'id': AclIds.group_to_add, 'access': 'READ'}, - {'id_type': 'USER', 'id': AclIds.user2_to_add, 'access': 'FULL_CONTROL'}, - ] - with dataset('APPS_NFS4', {'share_type': 'APPS'}) as ds: - ds_path = f'/mnt/{ds}' - call('filesystem.add_to_acl', {'path': ds_path, 'entries': nfs4_acl}, job=True) - acl = call('filesystem.getacl', ds_path)['acl'] - assert check_for_entry(acl, 'USER', AclIds.user_to_add, {'BASIC': 'MODIFY'}), acl - assert check_for_entry(acl, 'GROUP', AclIds.group_to_add, {'BASIC': 'READ'}), acl - assert check_for_entry(acl, 'USER', AclIds.user2_to_add, {'BASIC': 'FULL_CONTROL'}), acl - - # check behavior of using force option. - # presence of file in path should trigger failure if force is not set - results = ssh(f'touch {ds_path}/canary', complete_response=True) - assert results['result'] is True, results - - acl_changed = call('filesystem.add_to_acl', {'path': ds_path, 'entries': nfs4_acl}, job=True) - - assert acl_changed is False - - with pytest.raises(ClientException): - call('filesystem.add_to_acl', {'path': ds_path, 'entries': nfs4_acl + [ - {'id_type': 'GROUP', 'id': AclIds.group_to_add, 'access': 'MODIFY'}, - ]}, job=True) - - # check behavior of using force option. - # second call with `force` specified should succeed - acl_changed = call('filesystem.add_to_acl', { - 'path': ds_path, - 'entries': nfs4_acl + [{'id_type': 'GROUP', 'id': AclIds.group_to_add, 'access': 'MODIFY'}], - 'options': {'force': True} - }, job=True) - - assert acl_changed is True - - # we already added the entry earlier. - # this check makes sure we're not adding duplicate entries. - acl = call('filesystem.getacl', ds_path)['acl'] - assert check_for_entry(acl, 'USER', AclIds.user_to_add, {'BASIC': 'MODIFY'}), acl - assert check_for_entry(acl, 'GROUP', AclIds.group_to_add, {'BASIC': 'READ'}), acl - assert check_for_entry(acl, 'USER', AclIds.user2_to_add, {'BASIC': 'FULL_CONTROL'}), acl diff --git a/tests/api2/test_pool_dataset_create.py b/tests/api2/test_pool_dataset_create.py deleted file mode 100644 index c7a729d12bb38..0000000000000 --- a/tests/api2/test_pool_dataset_create.py +++ /dev/null @@ -1,12 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call - - -@pytest.mark.parametrize("child", ["a/b", "a/b/c"]) -def test_pool_dataset_create_ancestors(child): - with dataset("ancestors_create_test") as test_ds: - name = f"{test_ds}/{child}" - call("pool.dataset.create", {"name": name, "create_ancestors": True}) - call("pool.dataset.get_instance", name) diff --git a/tests/api2/test_pool_dataset_details.py b/tests/api2/test_pool_dataset_details.py deleted file mode 100644 index b38d19e97796c..0000000000000 --- a/tests/api2/test_pool_dataset_details.py +++ /dev/null @@ -1,41 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.cloud_sync import local_ftp_task -from middlewared.test.integration.assets.pool import dataset, pool -from middlewared.test.integration.utils import call, ssh - - -@pytest.fixture(scope="module") -def cloud_sync_fixture(): - with dataset("test_pool_dataset_details") as test_ds: - with dataset("test_pool_dataset_details_other") as other_ds: - with local_ftp_task({ - "path": f"/mnt/{pool}", - }) as task: - ssh(f"mkdir -p /mnt/{test_ds}/subdir") - ssh(f"mkdir -p /mnt/{other_ds}/subdir") - yield test_ds, other_ds, task - - -@pytest.mark.parametrize("path,count", [ - # A task that backs up the parent dataset backs up the child dataset too - (lambda test_ds, other_ds: f"/mnt/{pool}", 1), - # A task that backs up the dataself itself - (lambda test_ds, other_ds: f"/mnt/{test_ds}", 1), - # A task that backs up only the part of the dataset should not count - (lambda test_ds, other_ds: f"/mnt/{test_ds}/subdir", 0), - # Unrelated datasets should not count too - (lambda test_ds, other_ds: f"/mnt/{other_ds}", 0), - (lambda test_ds, other_ds: f"/mnt/{other_ds}/subdir", 0), -]) -def test_cloud_sync(cloud_sync_fixture, path, count): - test_ds, other_ds, task = cloud_sync_fixture - call("cloudsync.update", task["id"], {"path": path(test_ds, other_ds)}) - - result = call("pool.dataset.details") - details = [ - ds - for ds in result - if ds["name"] == test_ds - ][0] - assert details["cloudsync_tasks_count"] == count diff --git a/tests/api2/test_pool_dataset_encrypted.py b/tests/api2/test_pool_dataset_encrypted.py deleted file mode 100644 index d80069bfaabdf..0000000000000 --- a/tests/api2/test_pool_dataset_encrypted.py +++ /dev/null @@ -1,43 +0,0 @@ -import errno - -import pytest - -from middlewared.service_exception import CallError, ValidationErrors -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call - - -PASSPHRASE = "12345678" - - -def encryption_props(): - return { - "encryption_options": {"generate_key": False, "passphrase": PASSPHRASE}, - "encryption": True, - "inherit_encryption": False - } - - -def test_delete_locked_dataset(): - with dataset("test_delete_locked_dataset", encryption_props()) as ds: - call("pool.dataset.lock", ds, job=True) - - with pytest.raises(CallError) as ve: - call("filesystem.stat", f"/mnt/{ds}") - - assert ve.value.errno == errno.ENOENT - - -def test_unencrypted_dataset_within_encrypted_dataset(): - with dataset("test_pool_dataset_witin_encryted", encryption_props()) as ds: - with pytest.raises(ValidationErrors) as ve: - call("pool.dataset.create", { - "name": f"{ds}/child", - "encryption": False, - "inherit_encryption": False, - }) - - assert any( - "Cannot create an unencrypted dataset within an encrypted dataset" in error.errmsg - for error in ve.value.errors - ) is True, ve diff --git a/tests/api2/test_pool_dataset_info.py b/tests/api2/test_pool_dataset_info.py deleted file mode 100644 index 8d1b7c4cd60e4..0000000000000 --- a/tests/api2/test_pool_dataset_info.py +++ /dev/null @@ -1,6 +0,0 @@ -from middlewared.test.integration.utils import call -from middlewared.test.integration.assets.pool import pool - - -def test_recommended_zvol_blocksize(): - assert call("pool.dataset.recommended_zvol_blocksize", pool) == "16K" diff --git a/tests/api2/test_pool_dataset_processes.py b/tests/api2/test_pool_dataset_processes.py deleted file mode 100644 index 4817cb92f8510..0000000000000 --- a/tests/api2/test_pool_dataset_processes.py +++ /dev/null @@ -1,14 +0,0 @@ -import pytest - -from middlewared.test.integration.utils import call -from middlewared.test.integration.assets.pool import another_pool - -import os -import sys -sys.path.append(os.getcwd()) - - -def test_empty_for_locked_root_dataset(): - with another_pool({"encryption": True, "encryption_options": {"passphrase": "passphrase"}}): - call("pool.dataset.lock", "test", job=True) - assert call("pool.dataset.processes", "test") == [] diff --git a/tests/api2/test_pool_dataset_snapshot_count.py b/tests/api2/test_pool_dataset_snapshot_count.py deleted file mode 100644 index b004d5f60d535..0000000000000 --- a/tests/api2/test_pool_dataset_snapshot_count.py +++ /dev/null @@ -1,22 +0,0 @@ -import textwrap - -import pytest - -from middlewared.test.integration.utils import call, mock -from middlewared.test.integration.assets.pool import dataset - -import os -import sys -sys.path.append(os.getcwd()) - - -def test_empty_for_locked_root_dataset(): - with dataset("test_pool_dataset_snapshot_count") as ds: - for i in range(7): - call("zfs.snapshot.create", {"dataset": ds, "name": f"snap-{i}"}) - - with mock("zfs.snapshot.query", textwrap.dedent("""\ - def mock(self, *args): - raise Exception("Should not be called") - """)): - assert call("pool.dataset.snapshot_count", ds) == 7 diff --git a/tests/api2/test_pool_dataset_track_processes.py b/tests/api2/test_pool_dataset_track_processes.py deleted file mode 100644 index 693896f860d9b..0000000000000 --- a/tests/api2/test_pool_dataset_track_processes.py +++ /dev/null @@ -1,83 +0,0 @@ -import contextlib -import time - -import pytest -from pytest_dependency import depends -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.assets.pool import dataset, pool - -import os -import sys -sys.path.append(os.getcwd()) - - -@pytest.mark.parametrize("datasets,file_open_path,arg_path", [ - # A file on a dataset - ( - [('test', None)], - f'/mnt/{pool}/test/test_file', - lambda ssh: f'/mnt/{pool}/test', - ), - # zvol - ( - [('test', {'type': 'VOLUME', 'volsize': 1024 * 1024 * 100})], - f'/dev/zvol/{pool}/test', - lambda ssh: f'/dev/zvol/{pool}/test' - ), - # zvol with /dev/zd* path - ( - [('test', {'type': 'VOLUME', 'volsize': 1024 * 1024 * 100})], - f'/dev/zvol/{pool}/test', - lambda ssh: ssh(f'readlink -f /dev/zvol/{pool}/test').strip(), - ), - # A dataset with nested zvol - ( - [ - ('test', None), - ('test/zvol', {'type': 'VOLUME', 'volsize': 1024 * 1024 * 100}), - ], - f'/dev/zvol/{pool}/test/zvol', - lambda ssh: f'/dev/zvol/{pool}/test', - ), -]) -def test__open_path_and_check_proc(request, datasets, file_open_path, arg_path): - with contextlib.ExitStack() as stack: - for name, data in datasets: - stack.enter_context(dataset(name, data)) - - opened = False - try: - test_file = file_open_path - open_pid = ssh(f"""python -c 'import time; f = open("{test_file}", "w+"); time.sleep(10)' > /dev/null 2>&1 & echo $!""") - open_pid = open_pid.strip() - assert open_pid.isdigit(), f'{open_pid!r} is not a digit' - opened = True - - # spinning up python interpreter could take some time on busy system so sleep - # for a couple seconds to give it time - time.sleep(2) - - # what the cmdline output is formatted to - cmdline = f"""python -c import time; f = open(\"{test_file}\", \"w+\"); time.sleep(10)""" - - # have to use websocket since the method being called is private - res = call('pool.dataset.processes_using_paths', [arg_path(ssh)]) - assert len(res) == 1 - - result = res[0] - assert result['pid'] == open_pid, f'{result["pid"]!r} does not match {open_pid!r}' - assert result['cmdline'] == cmdline, f'{result["cmdline"]!r} does not match {cmdline!r}' - assert 'paths' not in result - - res = call('pool.dataset.processes_using_paths', [arg_path(ssh)], True) - assert len(res) == 1 - result = res[0] - assert result['pid'] == open_pid, f'{result["pid"]!r} does not match {open_pid!r}' - assert result['cmdline'] == cmdline, f'{result["cmdline"]!r} does not match {cmdline!r}' - assert 'paths' in result - assert len(result['paths']) == 1 - assert result['paths'][0] == test_file if test_file.startswith('/mnt') else '/dev/zd0' - - finally: - if opened: - ssh(f'kill -9 {open_pid}', check=False) diff --git a/tests/api2/test_pool_dataset_unlock_lock_immutable_flags.py b/tests/api2/test_pool_dataset_unlock_lock_immutable_flags.py deleted file mode 100644 index 1572b39c3a405..0000000000000 --- a/tests/api2/test_pool_dataset_unlock_lock_immutable_flags.py +++ /dev/null @@ -1,54 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, ssh - -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) - - -PASSPHRASE = '12345678' - - -def encryption_props(): - return { - 'encryption_options': {'generate_key': False, 'passphrase': PASSPHRASE}, - 'encryption': True, - 'inherit_encryption': False - } - - -def test_lock_sets_immutable_flag(): - with dataset('parent', encryption_props()) as parent_ds: - with dataset('parent/child', encryption_props()) as child_ds: - child_ds_mountpoint = os.path.join('/mnt', child_ds) - assert call('filesystem.is_immutable', child_ds_mountpoint) is False, child_ds_mountpoint - call('pool.dataset.lock', child_ds, job=True) - assert call('filesystem.is_immutable', child_ds_mountpoint) is True, child_ds_mountpoint - - parent_mountpoint = os.path.join('/mnt', parent_ds) - assert call('filesystem.is_immutable', parent_mountpoint) is False, parent_mountpoint - call('pool.dataset.lock', parent_ds, job=True) - assert call('filesystem.is_immutable', parent_mountpoint) is True, parent_mountpoint - - -def test_unlock_unsets_immutable_flag(): - with dataset('parent', encryption_props()) as parent_ds: - parent_mountpoint = os.path.join('/mnt', parent_ds) - with dataset('parent/child', encryption_props()) as child_ds: - child_ds_mountpoint = os.path.join('/mnt', child_ds) - call('pool.dataset.lock', parent_ds, job=True) - assert call('filesystem.is_immutable', parent_mountpoint) is True, parent_mountpoint - - call('pool.dataset.unlock', parent_ds, { - 'datasets': [{'name': parent_ds, 'passphrase': PASSPHRASE}, {'name': child_ds, 'passphrase': 'random'}], - 'recursive': True, - }, job=True) - assert call('filesystem.is_immutable', parent_mountpoint) is False, parent_mountpoint - assert call('filesystem.is_immutable', child_ds_mountpoint) is True, child_ds_mountpoint - call('pool.dataset.unlock', child_ds, { - 'datasets': [{'name': child_ds, 'passphrase': PASSPHRASE}], - }, job=True) - assert call('filesystem.is_immutable', child_ds_mountpoint) is False, child_ds_mountpoint diff --git a/tests/api2/test_pool_dataset_unlock_recursive.py b/tests/api2/test_pool_dataset_unlock_recursive.py deleted file mode 100644 index 66a953d7da291..0000000000000 --- a/tests/api2/test_pool_dataset_unlock_recursive.py +++ /dev/null @@ -1,40 +0,0 @@ -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.assets.pool import pool - - -def test_pool_dataset_unlock_recursive(): - key = "0" * 32 - try: - ssh(f"echo -n '{key}' > /tmp/key") - ssh(f"zfs create -o encryption=on -o keyformat=raw -o keylocation=file:///tmp/key {pool}/test") - ssh(f"zfs create -o encryption=on -o keyformat=raw -o keylocation=file:///tmp/key {pool}/test/nested") - ssh(f"echo TEST > /mnt/{pool}/test/nested/file") - ssh("rm /tmp/key") - ssh(f"zfs set readonly=on {pool}/test") - ssh(f"zfs set readonly=on {pool}/test/nested") - ssh(f"zfs unmount {pool}/test") - ssh(f"zfs unload-key -r {pool}/test") - - result = call("pool.dataset.unlock", f"{pool}/test", { - "recursive": True, - "datasets": [ - { - "name": f"{pool}/test", - "key": key.encode("ascii").hex(), - "recursive": True, - }, - ], - }, job=True) - assert not result["failed"] - - assert not call("pool.dataset.get_instance", f"{pool}/test")["locked"] - assert not call("pool.dataset.get_instance", f"{pool}/test/nested")["locked"] - - # Ensure the child dataset is mounted - assert ssh(f"cat /mnt/{pool}/test/nested/file") == "TEST\n" - - # Ensure the keys are stored in the database to be able to unlock the datasets after reboot - assert call("datastore.query", "storage.encrypteddataset", [["name", "=", f"{pool}/test"]]) - assert call("datastore.query", "storage.encrypteddataset", [["name", "=", f"{pool}/test/nested"]]) - finally: - call("pool.dataset.delete", f"{pool}/test", {"recursive": True}) diff --git a/tests/api2/test_pool_dataset_unlock_restart_vms.py b/tests/api2/test_pool_dataset_unlock_restart_vms.py deleted file mode 100644 index b0dca80e2fdba..0000000000000 --- a/tests/api2/test_pool_dataset_unlock_restart_vms.py +++ /dev/null @@ -1,58 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, mock, ssh - - -PASSPHRASE = "12345678" - - -def encryption_props(): - return { - "encryption_options": {"generate_key": False, "passphrase": PASSPHRASE}, - "encryption": True, - "inherit_encryption": False - } - - -@pytest.mark.parametrize("zvol", [True, False]) -def test_restart_vm_on_dataset_unlock(zvol): - if zvol: - data = {"type": "VOLUME", "volsize": 1048576} - else: - data = {} - - with dataset("test", {**data, **encryption_props()}) as ds: - call("pool.dataset.lock", ds, job=True) - - if zvol: - device = {"dtype": "DISK", "attributes": {"path": f"/dev/zvol/{ds}"}} - else: - device = {"dtype": "RAW", "attributes": {"path": f"/mnt/{ds}/child"}} - - with mock("vm.query", return_value=[{"id": 1, "devices": [device]}]): - with mock("vm.status", return_value={"state": "RUNNING"}): - ssh("rm -f /tmp/test-vm-stop") - with mock("vm.stop", """ - from middlewared.service import job - - @job() - def mock(self, job, *args): - with open("/tmp/test-vm-stop", "w") as f: - pass - """): - ssh("rm -f /tmp/test-vm-start") - with mock("vm.start", declaration=""" - def mock(self, job, *args): - with open("/tmp/test-vm-start", "w") as f: - pass - """): - call( - "pool.dataset.unlock", - ds, - {"datasets": [{"name": ds, "passphrase": PASSPHRASE}]}, - job=True, - ) - - call("filesystem.stat", "/tmp/test-vm-stop") - call("filesystem.stat", "/tmp/test-vm-start") diff --git a/tests/api2/test_pool_expand.py b/tests/api2/test_pool_expand.py deleted file mode 100644 index 05af372ccf674..0000000000000 --- a/tests/api2/test_pool_expand.py +++ /dev/null @@ -1,53 +0,0 @@ -import time - -from middlewared.test.integration.assets.pool import another_pool -from middlewared.test.integration.utils import call, ssh - - -def retry_get_parts_on_disk(disk, max_tries=10): - for i in range(max_tries): - if parts := call('disk.list_partitions', disk): - return parts - time.sleep(1) - else: - assert False, f'Failed after {max_tries} seconds for partition info on {disk!r}' - - -def test_expand_pool(): - with another_pool() as pool: - disk = pool["topology"]["data"][0]["disk"] - original_partition_size = call("disk.list_partitions", disk)[-1]["size"] - # Ensure that the test pool vdev is way larger than 2 GiB - assert original_partition_size > 2147483648 * 2 - - # Transform this pool into a pool on a vdev with a partition that is only 2 GiB - ssh(f"zpool export {pool['name']}") - ssh(f"sgdisk -d 1 /dev/{disk}") - ssh(f"sgdisk -n 1:0:+2GiB -t 1:BF01 /dev/{disk}") - small_partition = retry_get_parts_on_disk(disk)[-1] - assert small_partition["size"] < 2147483648 * 1.01 - device = "disk/by-partuuid/" + small_partition["partition_uuid"] - ssh(f"zpool create {pool['name']} -o altroot=/mnt -f {device}") - # Ensure that the pool size is small now - assert call("pool.get_instance", pool["id"])["size"] < 2147483648 * 1.01 - ssh(f"touch /mnt/{pool['name']}/test") - call("pool.expand", pool["id"], job=True) - - new_partition = call("disk.list_partitions", disk)[-1] - # Ensure that the partition size is way larger than 2 GiB - assert new_partition["size"] > 2147483648 * 2 - # Ensure that the pool size was increased - assert call("pool.get_instance", pool["id"])["size"] > 2147483648 * 2 - # Ensure that data was not destroyed - assert ssh(f"ls /mnt/{pool['name']}") == "test\n" - - -def test_expand_partition_keeps_initial_offset(): - disk = call("disk.get_unused")[0]["name"] - call("disk.wipe", disk, "QUICK", job=True) - ssh(f"sgdisk -n 0:8192:1GiB /dev/{disk}") - partition = retry_get_parts_on_disk(disk)[0] - call("pool.expand_partition", partition) - expanded_partition = retry_get_parts_on_disk(disk)[0] - assert expanded_partition["size"] > partition["size"] - assert expanded_partition["start"] == partition["start"] diff --git a/tests/api2/test_pool_export.py b/tests/api2/test_pool_export.py deleted file mode 100644 index 229c43401e4f4..0000000000000 --- a/tests/api2/test_pool_export.py +++ /dev/null @@ -1,62 +0,0 @@ -import pytest - -from truenas_api_client import ClientException -from middlewared.test.integration.assets.pool import another_pool -from middlewared.test.integration.utils import call, mock - -import os -import sys -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import PUT -from auto_config import pool_name, ha - - -def test_systemdataset_migrate_error(request): - """ - On HA this test will fail with the error below if failover is enabled: - [ENOTSUP] Disable failover before exporting last pool on system. - """ - - # Disable Failover - if ha is True: - results = PUT('/failover/', {"disabled": True, "master": True}) - assert results.status_code == 200, results.text - - pool = call("pool.query", [["name", "=", pool_name]], {"get": True}) - - with mock("systemdataset.update", """\ - from middlewared.service import job, CallError - - @job() - def mock(self, job, *args): - raise CallError("Test error") - """): - with pytest.raises(ClientException) as e: - call("pool.export", pool["id"], job=True) - - assert e.value.error == ( - "[EFAULT] This pool contains system dataset, but its reconfiguration failed: [EFAULT] Test error" - ) - - # Enable back Failover. - if ha is True: - results = PUT('/failover/', {"disabled": False, "master": True}) - assert results.status_code == 200, results.text - - -def test_destroy_offline_disks(): - with another_pool(topology=(2, lambda disks: { - "data": [ - {"type": "MIRROR", "disks": disks[0:2]}, - ], - })) as pool: - disk = pool["topology"]["data"][0]["children"][0] - - call("pool.offline", pool["id"], {"label": disk["guid"]}) - - call("pool.export", pool["id"], {"destroy": True}, job=True) - - unused = [unused for unused in call("disk.get_unused") if unused["name"] == disk["disk"]][0] - - assert unused["exported_zpool"] is None diff --git a/tests/api2/test_pool_is_upgraded.py b/tests/api2/test_pool_is_upgraded.py deleted file mode 100644 index 19d9050ad2b11..0000000000000 --- a/tests/api2/test_pool_is_upgraded.py +++ /dev/null @@ -1,34 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.pool import another_pool, pool -from middlewared.test.integration.utils import call, ssh - - -@pytest.fixture(scope="module") -def outdated_pool(): - with another_pool() as pool: - device = pool["topology"]["data"][0]["path"] - ssh(f"zpool export {pool['name']}") - ssh(f"zpool create {pool['name']} -o altroot=/mnt -o feature@sha512=disabled -f {device}") - yield pool - - -def test_is_upgraded(): - pool_id = call("pool.query", [["name", "=", pool]])[0]["id"] - assert call("pool.is_upgraded", pool_id) - - -def test_is_outdated(outdated_pool): - assert call("pool.is_upgraded", outdated_pool["id"]) is False - - -def test_is_outdated_in_list(outdated_pool): - pool = call("pool.query", [["id", "=", outdated_pool["id"]]], {"extra": {"is_upgraded": True}})[0] - assert pool["is_upgraded"] is False - - -# Flaky as one-shot alert creation might be delayed until `alert.process_alerts` completion. -@pytest.mark.flaky(reruns=5, reruns_delay=5) -def test_is_outdated_alert(outdated_pool): - alerts = call("alert.list") - assert any((i["klass"] == "PoolUpgraded" and i["args"] == outdated_pool["name"] for i in alerts)) diff --git a/tests/api2/test_pool_is_upgraded_alert_removal.py b/tests/api2/test_pool_is_upgraded_alert_removal.py deleted file mode 100644 index 3c43395c6ef4f..0000000000000 --- a/tests/api2/test_pool_is_upgraded_alert_removal.py +++ /dev/null @@ -1,40 +0,0 @@ -import contextlib -import time - -from middlewared.test.integration.assets.pool import another_pool -from middlewared.test.integration.utils import call, ssh - - -def assert_has_outdated_pool_alert(pool_name, has): - for i in range(60): - alerts = call("alert.list") - if any((i["klass"] == "PoolUpgraded" and i["args"] == pool_name for i in alerts)) == has: - break - - time.sleep(1) - else: - assert False, alerts - - -@contextlib.contextmanager -def outdated_pool(): - with another_pool() as pool: - device = pool["topology"]["data"][0]["path"] - ssh(f"zpool export {pool['name']}") - ssh(f"zpool create test -o altroot=/mnt -o feature@sha512=disabled -f {device}") - assert_has_outdated_pool_alert(pool["name"], True) - yield pool - - -def test_outdated_pool_alert_removed_on_pool_upgrade(): - with outdated_pool() as pool: - call("pool.upgrade", pool["id"]) - - assert_has_outdated_pool_alert(pool["name"], False) - - -def test_outdated_pool_alert_removed_on_pool_delete(): - with outdated_pool() as pool: - pass - - assert_has_outdated_pool_alert(pool["name"], False) diff --git a/tests/api2/test_pool_remove_disk.py b/tests/api2/test_pool_remove_disk.py deleted file mode 100644 index 38ca637054d9a..0000000000000 --- a/tests/api2/test_pool_remove_disk.py +++ /dev/null @@ -1,13 +0,0 @@ -from middlewared.test.integration.assets.pool import another_pool -from middlewared.test.integration.utils import call, ssh - - -def test_waits_for_device_removal(): - with another_pool(topology=(4, lambda disks: { - "data": [ - {"type": "MIRROR", "disks": disks[0:2]}, - {"type": "MIRROR", "disks": disks[2:4]} - ], - })) as pool: - ssh(f"dd if=/dev/urandom of=/mnt/{pool['name']}/blob bs=1M count=1000") - call("pool.remove", pool["id"], {"label": pool["topology"]["data"][0]["guid"]}, job=True) diff --git a/tests/api2/test_pool_replace_disk.py b/tests/api2/test_pool_replace_disk.py deleted file mode 100644 index 2c63b02c75e75..0000000000000 --- a/tests/api2/test_pool_replace_disk.py +++ /dev/null @@ -1,68 +0,0 @@ -from time import sleep - -import pytest - -from middlewared.test.integration.assets.pool import _2_disk_mirror_topology, _4_disk_raidz2_topology, another_pool -from middlewared.test.integration.utils import call - - -@pytest.mark.parametrize("topology", [_2_disk_mirror_topology, _4_disk_raidz2_topology]) -def test_pool_replace_disk(topology): - """This tests the following: - 1. create a zpool based on the `topology` - 2. flatten the newly created zpools topology - 3. verify the zpool vdev size matches reality - 4. choose 1st vdev from newly created zpool - 5. choose 1st disk in vdev from step #4 - 6. choose 1st disk in disk.get_unused as replacement disk - 7. call pool.replace using disk from step #5 with disk from step #6 - 8. validate that the disk being replaced still has zfs partitions - 9. validate pool.get_instance topology info shows the replacement disk - 10. validate disk.get_instance associates the replacement disk with the zpool - """ - with another_pool(topology=topology) as pool: # step 1 - # step 2 - flat_top = call("pool.flatten_topology", pool["topology"]) - pool_top = [vdev for vdev in flat_top if vdev["type"] == "DISK"] - # step 3 - assert len(pool_top) == topology[0] - - # step 4 - to_replace_vdev = pool_top[0] - # step 5 - to_replace_disk = call( - "disk.query", [["devname", "=", to_replace_vdev["disk"]]], {"get": True, "extra": {"pools": True}} - ) - assert to_replace_disk["pool"] == pool["name"] - - # step 6 - new_disk = call("disk.get_unused")[0] - - # step 7 - call("pool.replace", pool["id"], {"label": to_replace_vdev["guid"], "disk": new_disk["identifier"]}, job=True) - - # step 8 - assert call("disk.gptid_from_part_type", to_replace_disk["devname"], call("disk.get_zfs_part_type")) - - # step 9 - found = False - for _ in range(10): - if not found: - for i in call("pool.flatten_topology", call("pool.get_instance", pool["id"])["topology"]): - if i["type"] == "DISK" and i["disk"] == new_disk["devname"]: - found = True - break - else: - sleep(1) - - assert found, f'Failed to detect replacement disk {new_disk["devname"]!r} in zpool {pool["name"]!r}' - - # step 10 (NOTE: disk.sync_all takes awhile so we retry a few times here) - for _ in range(30): - cmd = ("disk.get_instance", new_disk["identifier"], {"extra": {"pools": True}}) - if call(*cmd)["pool"] == pool["name"]: - break - else: - sleep(1) - else: - assert False, f"{' '.join(cmd)} failed to update with pool information" diff --git a/tests/api2/test_pool_resilver.py b/tests/api2/test_pool_resilver.py deleted file mode 100644 index 43bb45a58587c..0000000000000 --- a/tests/api2/test_pool_resilver.py +++ /dev/null @@ -1,12 +0,0 @@ -from middlewared.test.integration.utils import call - - -def test_pool_resilver_update(): - resilver = { - "enabled": False, - "begin": "18:00", - "end": "09:00", - "weekday": [1, 2, 3, 4, 5, 6, 7], - } - - assert call("pool.resilver.update", resilver).items() > resilver.items() diff --git a/tests/api2/test_pool_spare.py b/tests/api2/test_pool_spare.py deleted file mode 100644 index ff128da74423a..0000000000000 --- a/tests/api2/test_pool_spare.py +++ /dev/null @@ -1,40 +0,0 @@ -import pytest - -from truenas_api_client import ValidationErrors -from middlewared.test.integration.assets.disk import fake_disks -from middlewared.test.integration.assets.pool import another_pool -from middlewared.test.integration.utils import call - - -def test_pool_create_too_small_spare(): - disk = call("disk.get_unused")[0]["name"] - - with fake_disks({"sdz": {"size": 1024 * 1024 * 1024}}): - with pytest.raises(ValidationErrors) as ve: - pool = call("pool.create", { - "name": "test", - "encryption": False, - "allow_duplicate_serials": True, - "topology": { - "data": [ - {"type": "STRIPE", "disks": [disk]}, - ], - "spares": ["sdz"], - }, - }, job=True) - call("pool.export", pool["id"], job=True) - - assert ve.value.errors[0].errmsg.startswith("Spare sdz (1 GiB) is smaller than the smallest data disk") - - -def test_pool_update_too_small_spare(): - with another_pool() as pool: - with fake_disks({"sdz": {"size": 1024 * 1024 * 1024}}): - with pytest.raises(ValidationErrors) as ve: - call("pool.update", pool["id"], { - "topology": { - "spares": ["sdz"], - }, - }, job=True) - - assert ve.value.errors[0].errmsg.startswith("Spare sdz (1 GiB) is smaller than the smallest data disk") diff --git a/tests/api2/test_port_delegates.py b/tests/api2/test_port_delegates.py deleted file mode 100644 index a107c8814527e..0000000000000 --- a/tests/api2/test_port_delegates.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python3 - -import os -import pytest -import sys - -apifolder = os.getcwd() -sys.path.append(apifolder) - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.utils import call - - -PAYLOAD = ( - ('ftp.config', 'ftp.update', ['port'], {}), -) - - -@pytest.mark.parametrize('config_method,method,keys,payload', PAYLOAD) -def test_port_delegate_validation_with_invalid_ports(config_method, method, keys, payload): - in_use_ports = [] - namespace = config_method.rsplit('.', 1)[0] - for entry in call('port.get_in_use'): - in_use_ports.extend(filter(lambda i: i[1] > 1024 and entry['namespace'] != namespace, entry['ports'])) - - assert in_use_ports != [], 'No in use ports retrieved' - - for index, key in enumerate(keys): - payload[key] = in_use_ports[index][1] if len(in_use_ports) > index else in_use_ports[0] - - with pytest.raises(ValidationErrors) as ve: - call(method, payload) - - assert any('The port is being used by' in error.errmsg for error in ve.value.errors) is True, ve - - -@pytest.mark.parametrize('config_method,method,keys,payload', PAYLOAD) -def test_port_delegate_validation_with_valid_ports(config_method, method, keys, payload): - in_use_ports = [] - for entry in call('port.get_in_use'): - in_use_ports.extend(entry['ports']) - - assert in_use_ports != [], 'No in use ports retrieved' - - validation_error = None - old_config = call(config_method) - to_restore_config = {} - used_ports = [] - for key in keys: - port = next(i for i in range(20000, 60000) if i not in in_use_ports and i not in used_ports) - payload[key] = port - used_ports.append(port) - to_restore_config[key] = old_config[key] - - try: - call(method, payload) - except ValidationErrors as ve: - validation_error = ve - else: - call(method, to_restore_config) - - assert validation_error is None, f'No validation exception expected: {validation_error}' diff --git a/tests/api2/test_quotas.py b/tests/api2/test_quotas.py deleted file mode 100644 index 23d24eb4b151b..0000000000000 --- a/tests/api2/test_quotas.py +++ /dev/null @@ -1,118 +0,0 @@ -from dataclasses import dataclass - -import pytest - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.utils import call -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.pool import dataset - - -@dataclass(frozen=True) -class QuotaConfig: - # user quota value - uq_value: int = 1000000 - # group quota value - gq_value: int = uq_value * 2 - # dataset quota value - dq_value: int = gq_value + 10000 - # dataset refquota value - drq_value: int = dq_value + 10000 - # temp dataset name - ds_name: str = 'temp_quota_ds_name' - - -@pytest.fixture(scope='module') -def temp_ds(): - with dataset(QuotaConfig.ds_name) as ds: - yield ds - - -@pytest.fixture(scope='module') -def temp_user(temp_ds): - user_info = { - 'username': 'test_quota_user', - 'full_name': 'Test Quota User', - 'password': 'test1234', - 'group_create': True, - } - with user(user_info) as u: - uid = call('user.get_instance', u['id'])['uid'] - grp = call('group.query', [['group', '=', u['username']]], {'get': True}) - yield {'uid': uid, 'gid': grp['gid'], 'user': u['username'], 'group': grp['group']} - - -@pytest.mark.parametrize('id_', ['0', 'root']) -@pytest.mark.parametrize( - 'quota_type,error', [ - (['USER', 'user quota on uid']), - (['USEROBJ', 'userobj quota on uid']), - (['GROUP', 'group quota on gid']), - (['GROUPOBJ', 'groupobj quota on gid']), - ], - ids=[ - 'root USER quota is prohibited', - 'root USEROBJ quota is prohibited', - 'root GROUP quota is prohibited', - 'root GROUPOBJ quota is prohibited', - ], -) -def test_error(temp_ds, id_, quota_type, error): - """Changing any quota type for the root user/group should be prohibited""" - with pytest.raises(ValidationErrors) as ve: - call('pool.dataset.set_quota', temp_ds, [{'quota_type': quota_type, 'id': id_, 'quota_value': 5242880}]) - assert ve.value.errors[0].errmsg == f'Setting {error} [0] is not permitted' - - -def test_quotas(temp_ds, temp_user): - user, uid = temp_user['user'], temp_user['uid'] - group, gid = temp_user['group'], temp_user['gid'] - uq_value = QuotaConfig.uq_value - gq_value = QuotaConfig.gq_value - dq_value = QuotaConfig.dq_value - drq_value = QuotaConfig.drq_value - - call('pool.dataset.set_quota', temp_ds, [ - {'quota_type': 'USER', 'id': user, 'quota_value': uq_value}, - {'quota_type': 'USEROBJ', 'id': user, 'quota_value': uq_value}, - {'quota_type': 'GROUP', 'id': group, 'quota_value': gq_value}, - {'quota_type': 'GROUPOBJ', 'id': group, 'quota_value': gq_value}, - {'quota_type': 'DATASET', 'id': 'QUOTA', 'quota_value': dq_value}, - {'quota_type': 'DATASET', 'id': 'REFQUOTA', 'quota_value': drq_value}, - ]) - - verify_info = ( - ( - { - 'quota_type': 'USER', - 'id': uid, - 'quota': uq_value, - 'obj_quota': uq_value, - 'name': user - }, - 'USER', - ), - ( - { - 'quota_type': 'GROUP', - 'id': gid, - 'quota': gq_value, - 'obj_quota': gq_value, - 'name': group - }, - 'GROUP', - ), - ( - { - 'quota_type': 'DATASET', - 'id': temp_ds, - 'name': temp_ds, - 'quota': dq_value, - 'refquota': drq_value, - }, - 'DATASET', - ), - ) - for er, quota_type in verify_info: - for result in filter(lambda x: x['id'] == er['id'], call('pool.dataset.get_quota', temp_ds, quota_type)): - assert all((result[j] == er[j] for j in er)), result diff --git a/tests/api2/test_rate_limit.py b/tests/api2/test_rate_limit.py deleted file mode 100644 index c3419dcd9897a..0000000000000 --- a/tests/api2/test_rate_limit.py +++ /dev/null @@ -1,56 +0,0 @@ -import errno - -import pytest -from pytest_dependency import depends - -from middlewared.test.integration.utils import call, client - -NOAUTH_METHOD = 'system.boot_id' -SEP = '_##_' - - -@pytest.mark.dependency(name='rate_limited') -def test_unauth_requests_are_rate_limited(): - """Test that the truenas server rate limits a caller - that is hammering an endpoint that requires no authentication.""" - with client(auth=None) as c: - for i in range(1, 22): - if i <= 20: - # default is 20 calls within 60 second timeframe - assert c.call(NOAUTH_METHOD) - else: - with pytest.raises(Exception) as ve: - # on 21st call within 60 seconds, rate limit kicks in - c.call(NOAUTH_METHOD) - assert ve.value.errno == errno.EBUSY - - -def test_rate_limit_global_cache_entries(request): - """Test that middleware's rate limit plugin for interacting - with the global cache behaves as intended.""" - depends(request, ['rate_limited']) - cache = call('rate.limit.cache_get') - # the mechanism by which the rate limit chooses a unique key - # for inserting into the dictionary is by using the api endpoint - # name as part of the string - assert any((NOAUTH_METHOD in i for i in cache)), cache - - # now let's pop the last entry of the cache - len_cache_before_pop = len(cache) - popped_method, popped_ip = list(cache)[-1].split(SEP) - call('rate.limit.cache_pop', popped_method, popped_ip) - new_cache = call('rate.limit.cache_get') - assert len(new_cache) != len_cache_before_pop, new_cache - - # finally, let's clear the cache - call('rate.limit.cache_clear') - new_new_cache = call('rate.limit.cache_get') - assert len(new_new_cache) == 0, new_new_cache - - -def test_auth_requests_are_not_rate_limited(): - """Test that the truenas server does NOT rate limit a caller - that hammers an endpoint when said caller has been authenticated - and that method requires authentication.""" - for i in range(1, 22): - assert call('system.host_id') diff --git a/tests/api2/test_replication.py b/tests/api2/test_replication.py deleted file mode 100644 index 1b6e49c635f94..0000000000000 --- a/tests/api2/test_replication.py +++ /dev/null @@ -1,243 +0,0 @@ -import contextlib -import random -import string - -import pytest - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.keychain import localhost_ssh_credentials -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.assets.replication import replication_task -from middlewared.test.integration.assets.snapshot_task import snapshot_task -from middlewared.test.integration.utils import call, pool, ssh - - -BASE_REPLICATION = { - "direction": "PUSH", - "transport": "LOCAL", - "source_datasets": ["data"], - "target_dataset": "data", - "recursive": False, - "auto": False, - "retention_policy": "NONE", -} - - -@pytest.fixture(scope="module") -def ssh_credentials(): - with localhost_ssh_credentials(username="root") as c: - yield c - - -@pytest.fixture(scope="module") -def periodic_snapshot_tasks(): - result = {} - with contextlib.ExitStack() as stack: - for k, v in { - "data-recursive": { - "dataset": "tank/data", - "recursive": True, - "lifetime_value": 1, - "lifetime_unit": "WEEK", - "naming_schema": "auto-%Y%m%d.%H%M%S-1w", - "schedule": {}, - }, - "data-work-nonrecursive": { - "dataset": "tank/data/work", - "recursive": False, - "lifetime_value": 1, - "lifetime_unit": "WEEK", - "naming_schema": "auto-%Y%m%d.%H%M%S-1w", - "schedule": {}, - }, - - "exclude": { - "dataset": "tank/exclude", - "recursive": True, - "exclude": ["tank/exclude/work/garbage"], - "lifetime_value": 1, - "lifetime_unit": "WEEK", - "naming_schema": "snap-%Y%m%d-%H%M-1w", - "schedule": {}, - }, - }.items(): - stack.enter_context(dataset(v["dataset"].removeprefix("tank/"))) - result[k] = stack.enter_context(snapshot_task(v)) - - yield result - - -@pytest.mark.parametrize("req,error", [ - # Push + naming-schema - (dict(naming_schema=["snap-%Y%m%d-%H%M-1m"]), "naming_schema"), - - # Auto with both periodic snapshot task and schedule - (dict(periodic_snapshot_tasks=["data-recursive"], schedule={"minute": "*/2"}, auto=True), None), - # Auto with periodic snapshot task - (dict(periodic_snapshot_tasks=["data-recursive"], auto=True), None), - # Auto with schedule - (dict(also_include_naming_schema=["snap-%Y%m%d-%H%M-2m"], schedule={"minute": "*/2"}, auto=True), None), - # Auto without periodic snapshot task or schedule - (dict(also_include_naming_schema=["snap-%Y%m%d-%H%M-2m"], auto=True), "auto"), - - # Pull + periodic snapshot tasks - (dict(direction="PULL", periodic_snapshot_tasks=["data-recursive"]), "periodic_snapshot_tasks"), - # Pull with naming schema - (dict(direction="PULL", naming_schema=["snap-%Y%m%d-%H%M-1w"]), None), - # Pull + also_include_naming_schema - (dict(direction="PULL", naming_schema=["snap-%Y%m%d-%H%M-1w"], also_include_naming_schema=["snap-%Y%m%d-%H%M-2m"]), - "also_include_naming_schema"), - # Pull + hold_pending_snapshots - (dict(direction="PULL", naming_schema=["snap-%Y%m%d-%H%M-1w"], hold_pending_snapshots=True), - "hold_pending_snapshots"), - - # SSH+Netcat - (dict(periodic_snapshot_tasks=["data-recursive"], - transport="SSH+NETCAT", ssh_credentials=True, netcat_active_side="LOCAL", netcat_active_side_port_min=1024, - netcat_active_side_port_max=50000), - None), - # Bad netcat_active_side_port_max - (dict(transport="SSH+NETCAT", ssh_credentials=True, netcat_active_side="LOCAL", netcat_active_side_port_min=60000, - netcat_active_side_port_max=50000), - "netcat_active_side_port_max"), - # SSH+Netcat + compression - (dict(transport="SSH+NETCAT", compression="LZ4"), "compression"), - # SSH+Netcat + speed limit - (dict(transport="SSH+NETCAT", speed_limit=1024), "speed_limit"), - - # Does not exclude garbage - (dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=True), "exclude"), - # Does not exclude garbage - (dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=True, - exclude=["tank/exclude/work/garbage"]), - None), - # May not exclude if not recursive - (dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=False), None), - # Can't replicate excluded dataset - (dict(source_datasets=["tank/exclude/work/garbage"], periodic_snapshot_tasks=["exclude"]), - "source_datasets.0"), - - # Non-recursive exclude - (dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=False, - exclude=["tank/exclude/work/garbage"]), - "exclude"), - - # Unrelated exclude - (dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=True, - exclude=["tank/data"]), - "exclude.0"), - - # Does not require unrelated exclude - (dict(source_datasets=["tank/exclude/work/important"], periodic_snapshot_tasks=["exclude"], recursive=True), - None), - - # Custom retention policy - (dict(periodic_snapshot_tasks=["data-recursive"], - retention_policy="CUSTOM", lifetime_value=2, lifetime_unit="WEEK"), None), - - # Complex custom retention policy - (dict(periodic_snapshot_tasks=["data-recursive"], - retention_policy="CUSTOM", lifetime_value=2, lifetime_unit="WEEK", lifetimes=[ - dict(schedule={"hour": "0"}, lifetime_value=30, lifetime_unit="DAY"), - dict(schedule={"hour": "0", "dow": "1"}, lifetime_value=1, lifetime_unit="YEAR"), - ]), None), - - # name_regex - (dict(name_regex="manual-.+"), None), - (dict(direction="PULL", name_regex="manual-.+"), None), - (dict(name_regex="manual-.+", - retention_policy="CUSTOM", lifetime_value=2, lifetime_unit="WEEK"), "retention_policy"), - - # replicate - (dict(source_datasets=["tank/data", "tank/data/work"], periodic_snapshot_tasks=["data-recursive"], replicate=True, - recursive=True, properties=True), - "source_datasets.1"), - (dict(source_datasets=["tank/data"], periodic_snapshot_tasks=["data-recursive", "data-work-nonrecursive"], - replicate=True, recursive=True, properties=True), - "periodic_snapshot_tasks.1"), -]) -def test_create_replication(ssh_credentials, periodic_snapshot_tasks, req, error): - if "ssh_credentials" in req: - req["ssh_credentials"] = ssh_credentials["credentials"]["id"] - - if "periodic_snapshot_tasks" in req: - req["periodic_snapshot_tasks"] = [periodic_snapshot_tasks[k]["id"] for k in req["periodic_snapshot_tasks"]] - - name = "".join(random.choice(string.ascii_letters) for _ in range(64)) - data = dict(BASE_REPLICATION, name=name, **req) - - if error: - with pytest.raises(ValidationErrors) as ve: - with replication_task(data): - pass - - assert any(e.attribute == f"replication_create.{error}" for e in ve.value.errors) - else: - with replication_task(data) as replication: - restored = call("replication.restore", replication["id"], { - "name": f"restore {name}", - "target_dataset": "data/restore", - }) - call("replication.delete", restored["id"]) - - -@pytest.mark.parametrize("data,path,include", [ - ({"direction": "PUSH", "source_datasets": ["data/child"]}, "/mnt/data/", True), - ({"direction": "PUSH", "source_datasets": ["data/child"]}, "/mnt/data/child", True), - ({"direction": "PUSH", "source_datasets": ["data/child"]}, "/mnt/data/child/work", False), - ({"direction": "PULL", "target_dataset": "data/child"}, "/mnt/data", True), - ({"direction": "PULL", "target_dataset": "data/child"}, "/mnt/data/child", True), - ({"direction": "PULL", "target_dataset": "data/child"}, "/mnt/data/child/work", False), -]) -def test_query_attachment_delegate(ssh_credentials, data, path, include): - data = { - "name": "Test", - "transport": "SSH", - "source_datasets": ["source"], - "target_dataset": "target", - "recursive": False, - "name_regex": ".+", - "auto": False, - "retention_policy": "NONE", - **data, - } - if data["transport"] == "SSH": - data["ssh_credentials"] = ssh_credentials["credentials"]["id"] - - with replication_task(data) as t: - result = call("pool.dataset.query_attachment_delegate", "replication", path, True) - if include: - assert len(result) == 1 - assert result[0]["id"] == t["id"] - else: - assert len(result) == 0 - - -@pytest.mark.parametrize("exclude_mountpoint_property", [True, False]) -def test_run_onetime__exclude_mountpoint_property(exclude_mountpoint_property): - with dataset("src") as src: - with dataset("src/legacy") as src_legacy: - ssh(f"zfs set mountpoint=legacy {src_legacy}") - ssh(f"zfs snapshot -r {src}@2022-01-01-00-00-00") - - try: - call("replication.run_onetime", { - "direction": "PUSH", - "transport": "LOCAL", - "source_datasets": [src], - "target_dataset": f"{pool}/dst", - "recursive": True, - "also_include_naming_schema": ["%Y-%m-%d-%H-%M-%S"], - "retention_policy": "NONE", - "replicate": True, - "readonly": "IGNORE", - "exclude_mountpoint_property": exclude_mountpoint_property - }, job=True) - - mountpoint = ssh(f"zfs get -H -o value mountpoint {pool}/dst/legacy").strip() - if exclude_mountpoint_property: - assert mountpoint == f"/mnt/{pool}/dst/legacy" - else: - assert mountpoint == "legacy" - finally: - ssh(f"zfs destroy -r {pool}/dst", check=False) diff --git a/tests/api2/test_replication_role.py b/tests/api2/test_replication_role.py deleted file mode 100644 index 8543b5baca0ca..0000000000000 --- a/tests/api2/test_replication_role.py +++ /dev/null @@ -1,108 +0,0 @@ -import pytest - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.account import unprivileged_user_client -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.assets.replication import replication_task - - -@pytest.mark.parametrize("has_pull", [False, True]) -def test_create_pull_replication(has_pull): - with dataset("src") as src: - with dataset("dst") as dst: - payload = { - "name": "Test", - "direction": "PULL", - "transport": "LOCAL", - "source_datasets": [src], - "target_dataset": dst, - "recursive": True, - "naming_schema": ["%Y-%m-%d-%H-%M-%S"], - "retention_policy": "NONE", - "auto": False, - } - - if has_pull: - role = "REPLICATION_TASK_WRITE_PULL" - else: - role = "REPLICATION_TASK_WRITE" - with unprivileged_user_client([role]) as c: - if has_pull: - task = c.call("replication.create", payload) - c.call("replication.delete", task["id"]) - else: - with pytest.raises(ValidationErrors) as ve: - c.call("replication.create", payload) - - assert ve.value.errors[0].attribute == "replication_create.direction" - - -@pytest.mark.parametrize("has_pull", [False, True]) -def test_update_pull_replication(has_pull): - with dataset("src") as src: - with dataset("dst") as dst: - with replication_task({ - "name": "Test", - "direction": "PUSH", - "transport": "LOCAL", - "source_datasets": [src], - "target_dataset": dst, - "recursive": True, - "also_include_naming_schema": ["%Y-%m-%d-%H-%M-%S"], - "retention_policy": "NONE", - "auto": False, - }) as t: - payload = { - "direction": "PULL", - "naming_schema": ["%Y-%m-%d-%H-%M-%S"], - "also_include_naming_schema": [], - } - - if has_pull: - role = "REPLICATION_TASK_WRITE_PULL" - else: - role = "REPLICATION_TASK_WRITE" - with unprivileged_user_client([role]) as c: - if has_pull: - c.call("replication.update", t["id"], payload) - else: - with pytest.raises(ValidationErrors) as ve: - c.call("replication.update", t["id"], payload) - - assert ve.value.errors[0].attribute == "replication_update.direction" - - -@pytest.mark.parametrize("has_pull", [False, True]) -def test_restore_push_replication(has_pull): - with dataset("src") as src: - with dataset("dst") as dst: - with replication_task({ - "name": "Test", - "direction": "PUSH", - "transport": "LOCAL", - "source_datasets": [src], - "target_dataset": dst, - "recursive": True, - "also_include_naming_schema": ["%Y-%m-%d-%H-%M-%S"], - "retention_policy": "NONE", - "auto": False, - }) as t: - with dataset("dst2") as dst2: - payload = { - "name": "Test restore", - "target_dataset": dst2, - } - - if has_pull: - role = "REPLICATION_TASK_WRITE_PULL" - else: - role = "REPLICATION_TASK_WRITE" - with unprivileged_user_client([role]) as c: - if has_pull: - rt = c.call("replication.restore", t["id"], payload) - c.call("replication.delete", rt["id"]) - else: - with pytest.raises(ValidationErrors) as ve: - c.call("replication.restore", t["id"], payload) - - assert ve.value.errors[0].attribute == "replication_create.direction" diff --git a/tests/api2/test_replication_sudo.py b/tests/api2/test_replication_sudo.py deleted file mode 100644 index d14a1147b0d99..0000000000000 --- a/tests/api2/test_replication_sudo.py +++ /dev/null @@ -1,54 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, password, ssh - - -@pytest.mark.parametrize("task", [ - {"direction": "PUSH", "also_include_naming_schema": ["auto-%Y-%m-%d-%H-%M"]}, - {"direction": "PULL", "naming_schema": ["auto-%Y-%m-%d-%H-%M"]}, -]) -def test_replication_sudo(task): - with dataset("admin") as admin_homedir: - with user({ - "username": "admin", - "full_name": "admin", - "group_create": True, - "home": f"/mnt/{admin_homedir}", - "password": "test1234", - }): - ssh_connection = call("keychaincredential.setup_ssh_connection", { - "private_key": { - "generate_key": True, - "name": "test key", - }, - "connection_name": "test", - "setup_type": "SEMI-AUTOMATIC", - "semi_automatic_setup": { - "url": "http://localhost", - "password": password(), - "username": "admin", - "sudo": True, - }, - }) - try: - with dataset("src") as src: - ssh(f"touch /mnt/{src}/test") - call("zfs.snapshot.create", {"dataset": src, "name": "auto-2023-01-18-16-00"}) - with dataset("dst") as dst: - call("replication.run_onetime", { - **task, - "transport": "SSH", - "ssh_credentials": ssh_connection["id"], - "sudo": True, - "source_datasets": [src], - "target_dataset": dst, - "recursive": False, - "retention_policy": "NONE", - }, job=True) - - assert ssh(f"ls /mnt/{dst}") == "test\n" - finally: - call("keychaincredential.delete", ssh_connection["id"]) - call("keychaincredential.delete", ssh_connection["attributes"]["private_key"]) diff --git a/tests/api2/test_replication_utils.py b/tests/api2/test_replication_utils.py deleted file mode 100644 index 34c2c6c852423..0000000000000 --- a/tests/api2/test_replication_utils.py +++ /dev/null @@ -1,31 +0,0 @@ -import pytest - -from middlewared.test.integration.utils import call, pool - - -@pytest.fixture(scope="module") -def localhost_ssh_connection(): - credential = call("keychaincredential.create", { - "name": "key", - "type": "SSH_KEY_PAIR", - "attributes": call("keychaincredential.generate_ssh_key_pair"), - }) - try: - token = call("auth.generate_token") - connection = call("keychaincredential.remote_ssh_semiautomatic_setup", { - "name": "localhost", - "url": "http://localhost", - "token": token, - "private_key": credential["id"], - }) - try: - yield connection["id"] - finally: - call("keychaincredential.delete", connection["id"]) - finally: - call("keychaincredential.delete", credential["id"]) - - -@pytest.mark.parametrize("transport", ["SSH", "SSH+NETCAT"]) -def test_list_datasets_ssh(localhost_ssh_connection, transport): - assert pool in call("replication.list_datasets", transport, localhost_ssh_connection) diff --git a/tests/api2/test_reporting_netdataweb.py b/tests/api2/test_reporting_netdataweb.py deleted file mode 100644 index b6cf98f7c6c6b..0000000000000 --- a/tests/api2/test_reporting_netdataweb.py +++ /dev/null @@ -1,29 +0,0 @@ -import pytest -import requests -from requests.auth import HTTPBasicAuth - -from middlewared.test.integration.assets.account import unprivileged_user_client -from middlewared.test.integration.utils import call, url - - -def test_netdata_web_login_succeed(): - password = call('reporting.netdataweb_generate_password') - r = requests.get(f'{url()}/netdata/', auth=HTTPBasicAuth('root', password)) - assert r.status_code == 200 - - -def test_netdata_web_login_fail(): - r = requests.get(f'{url()}/netdata/') - assert r.status_code == 401 - - -@pytest.mark.parametrize("role,expected", [ - (["FULL_ADMIN"], True), - (["READONLY_ADMIN"], True), -]) -def test_netdata_web_login_unprivileged_succeed(role, expected): - with unprivileged_user_client(roles=role) as c: - me = c.call('auth.me') - password = c.call('reporting.netdataweb_generate_password') - r = requests.get(f'{url()}/netdata/', auth=HTTPBasicAuth(me['pw_name'], password)) - assert (r.status_code == 200) is expected diff --git a/tests/api2/test_reporting_realtime.py b/tests/api2/test_reporting_realtime.py deleted file mode 100644 index 59ebd7fefb090..0000000000000 --- a/tests/api2/test_reporting_realtime.py +++ /dev/null @@ -1,19 +0,0 @@ -import time - -from middlewared.test.integration.assets.account import unprivileged_user_client - - -def test_reporting_realtime(): - with unprivileged_user_client(["REPORTING_READ"]) as c: - events = [] - - def callback(type, **message): - events.append((type, message)) - - c.subscribe("reporting.realtime", callback, sync=True) - - time.sleep(5) - - assert events - - assert not events[0][1]["fields"]["failed_to_connect"] diff --git a/tests/api2/test_rest_api.py b/tests/api2/test_rest_api.py deleted file mode 100644 index 4d4669f9c36ce..0000000000000 --- a/tests/api2/test_rest_api.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding=utf-8 -*- -import urllib.parse - -from middlewared.test.integration.utils import call - -import os -import sys -sys.path.append(os.getcwd()) -from functions import GET - - -def test_non_numeric_identifiers(): - disk = call('disk.query')[0] - results = GET(f'/disk/id/{urllib.parse.quote(disk["identifier"])}/') - assert results.status_code == 200, results.text diff --git a/tests/api2/test_rest_api_authentication.py b/tests/api2/test_rest_api_authentication.py deleted file mode 100644 index a245b9d5f2619..0000000000000 --- a/tests/api2/test_rest_api_authentication.py +++ /dev/null @@ -1,152 +0,0 @@ -# -*- coding=utf-8 -*- -import contextlib -import io -import json - -import pytest -import requests - -from middlewared.test.integration.assets.account import unprivileged_user as unprivileged_user_template -from middlewared.test.integration.assets.api_key import api_key -from middlewared.test.integration.utils import client -from middlewared.test.integration.utils.client import truenas_server - -import os -import sys -sys.path.append(os.getcwd()) -from functions import POST, GET, DELETE, SSH_TEST - - -@contextlib.contextmanager -def api_key_auth(allowlist): - with api_key(allowlist) as key: - yield dict(anonymous=True, headers={"Authorization": f"Bearer {key}"}) - - -@contextlib.contextmanager -def login_password_auth(allowlist): - with unprivileged_user_template( - username="unprivileged", - group_name="unprivileged_users", - privilege_name="Unprivileged users", - allowlist=allowlist, - web_shell=False, - ) as t: - yield dict(auth=(t.username, t.password)) - - -@contextlib.contextmanager -def token_auth(allowlist): - with unprivileged_user_template( - username="unprivileged", - group_name="unprivileged_users", - privilege_name="Unprivileged users", - allowlist=allowlist, - web_shell=False, - ) as t: - with client(auth=(t.username, t.password)) as c: - token = c.call("auth.generate_token", 300, {}, True) - yield dict(anonymous=True, headers={"Authorization": f"Token {token}"}) - - -@pytest.fixture(params=[api_key_auth, login_password_auth, token_auth]) -def auth(request): - return request.param - - -def test_root_api_key_rest(auth): - """We should be able to call a method with a root credential using REST API.""" - with auth([{"method": "*", "resource": "*"}]) as kwargs: - results = GET('/system/info/', **kwargs) - assert results.status_code == 200, results.text - - -def test_allowed_api_key_rest_plain(auth): - """We should be able to request an endpoint with a credential that allows that request using REST API.""" - with auth([{"method": "GET", "resource": "/system/info/"}]) as kwargs: - results = GET('/system/info/', **kwargs) - assert results.status_code == 200, results.text - - -def test_allowed_api_key_rest_dynamic(auth): - """We should be able to request a dynamic endpoint with a credential that allows that request using REST API.""" - with auth([{"method": "GET", "resource": "/user/id/{id_}/"}]) as kwargs: - results = GET('/user/id/1/', **kwargs) - assert results.status_code == 200, results.text - - -def test_denied_api_key_rest(auth): - """ - We should not be able to request an endpoint with a credential that does not allow that request using REST API. - """ - with auth([{"method": "GET", "resource": "/system/info_/"}]) as kwargs: - results = GET('/system/info/', **kwargs) - assert results.status_code == 403 - - -def test_root_api_key_upload(auth): - """We should be able to call a method with root a credential using file upload endpoint.""" - ip = truenas_server.ip - with auth([{"method": "*", "resource": "*"}]) as kwargs: - kwargs.pop("anonymous", None) # This key is only used for our test requests library - r = requests.post( - f"http://{ip}/_upload", - **kwargs, - data={ - "data": json.dumps({ - "method": "filesystem.put", - "params": ["/tmp/upload"], - }) - }, - files={ - "file": io.BytesIO(b"test"), - }, - timeout=10 - ) - r.raise_for_status() - - -def test_allowed_api_key_upload(auth): - """We should be able to call a method with an API that allows that call using file upload endpoint.""" - ip = truenas_server.ip - with auth([{"method": "CALL", "resource": "filesystem.put"}]) as kwargs: - kwargs.pop("anonymous", None) # This key is only used for our test requests library - r = requests.post( - f"http://{ip}/_upload", - **kwargs, - data={ - "data": json.dumps({ - "method": "filesystem.put", - "params": ["/tmp/upload"], - }) - }, - files={ - "file": io.BytesIO(b"test"), - }, - timeout=10 - ) - r.raise_for_status() - - -def test_denied_api_key_upload(auth): - """ - We should not be able to call a method with a credential that does not allow that call using file upload endpoint. - """ - ip = truenas_server.ip - with auth([{"method": "CALL", "resource": "filesystem.put_"}]) as kwargs: - kwargs.pop("anonymous", None) # This key is only used for our test requests library - r = requests.post( - f"http://{ip}/_upload", - **kwargs, - data={ - "data": json.dumps({ - "method": "filesystem.put", - "params": ["/tmp/upload"], - }) - }, - files={ - "file": io.BytesIO(b"test"), - }, - timeout=10 - ) - assert r.status_code == 403 diff --git a/tests/api2/test_rest_api_download.py b/tests/api2/test_rest_api_download.py deleted file mode 100644 index 92b22fc77301d..0000000000000 --- a/tests/api2/test_rest_api_download.py +++ /dev/null @@ -1,106 +0,0 @@ -import errno -import time - -import pytest -import requests - -from middlewared.service_exception import CallError -from middlewared.test.integration.assets.account import unprivileged_user -from middlewared.test.integration.utils import client, session, url - - -@pytest.mark.parametrize("method", ["test_download_pipe", "test_download_unchecked_pipe"]) -def test_download(method): - with session() as s: - r = s.post( - f"{url()}/api/v2.0/resttest/{method}", - headers={"Content-type": "application/json"}, - data="{\"key\": \"value\"}", - ) - r.raise_for_status() - assert r.headers["Content-Type"] == "application/octet-stream" - assert r.text == '{"key": "value"}' - - -def test_no_download_from_checked_pipe(): - with session() as s: - r = s.post( - f"{url()}/api/v2.0/resttest/test_download_pipe?download=0", - headers={"Content-type": "application/json"}, - data="{\"key\": \"value\"}", - ) - - assert r.status_code == 400 - assert r.json()["message"] == "JSON response is not supported for this method." - - -def test_no_download_from_unchecked_pipe(): - with session() as s: - r = s.post( - f"{url()}/api/v2.0/resttest/test_download_unchecked_pipe?download=0", - headers={"Content-type": "application/json"}, - data="{\"key\": \"value\"}", - ) - r.raise_for_status() - - assert r.headers["Content-Type"].startswith("application/json") - assert r.json() == {"wrapped": {"key": "value"}} - - -def test_download_from_download_endpoint(): - with client() as c: - job_id, path = c.call("core.download", "resttest.test_download_pipe", [{"key": "value"}], "file.bin") - - r = requests.get(f"{url()}{path}") - r.raise_for_status() - - assert r.headers["Content-Disposition"] == "attachment; filename=\"file.bin\"" - assert r.headers["Content-Type"] == "application/octet-stream" - assert r.text == '{"key": "value"}' - - -@pytest.mark.parametrize("buffered,sleep,result", [ - (True, 0, ""), - (True, 4, '{"key": "value"}'), - (False, 0, '{"key": "value"}'), -]) -def test_buffered_download_from_slow_download_endpoint(buffered, sleep, result): - with client() as c: - job_id, path = c.call("core.download", "resttest.test_download_slow_pipe", [{"key": "value"}], "file.bin", - buffered) - - time.sleep(sleep) - - r = requests.get(f"{url()}{path}") - r.raise_for_status() - - assert r.headers["Content-Disposition"] == "attachment; filename=\"file.bin\"" - assert r.headers["Content-Type"] == "application/octet-stream" - assert r.text == result - - -def test_download_authorization_ok(): - with unprivileged_user( - username="unprivileged", - group_name="unprivileged_users", - privilege_name="Unprivileged users", - allowlist=[{"method": "CALL", "resource": "resttest.test_download_slow_pipe"}], - web_shell=False, - ) as user: - with client(auth=(user.username, user.password)) as c: - c.call("core.download", "resttest.test_download_slow_pipe", [{"key": "value"}], "file.bin") - - -def test_download_authorization_fails(): - with unprivileged_user( - username="unprivileged", - group_name="unprivileged_users", - privilege_name="Unprivileged users", - allowlist=[], - web_shell=False, - ) as user: - with client(auth=(user.username, user.password)) as c: - with pytest.raises(CallError) as ve: - c.call("core.download", "resttest.test_download_slow_pipe", [{"key": "value"}], "file.bin") - - assert ve.value.errno == errno.EACCES diff --git a/tests/api2/test_rest_api_upload.py b/tests/api2/test_rest_api_upload.py deleted file mode 100644 index 1c37a0016fef3..0000000000000 --- a/tests/api2/test_rest_api_upload.py +++ /dev/null @@ -1,68 +0,0 @@ -import io -import json - -import pytest - -from middlewared.test.integration.utils import client, session, url - - -@pytest.mark.parametrize("method", ["test_input_pipe", "test_input_unchecked_pipe"]) -def test_upload(method): - with session() as s: - r = s.post( - f"{url()}/api/v2.0/resttest/{method}", - files={ - "data": (None, io.StringIO('{"key": "value"}')), - "file": (None, io.StringIO("FILE")), - }, - ) - r.raise_for_status() - job_id = r.json() - - with client() as c: - assert c.call("core.job_wait", job_id, job=True) == '{"key": "value"}FILE' - - -def test_no_upload_to_checked_pipe(): - with session() as s: - r = s.post( - f"{url()}/api/v2.0/resttest/test_input_pipe", - headers={"Content-type": "application/json"}, - data="{\"key\": \"value\"}", - ) - - assert r.status_code == 400 - assert r.json()["message"] == "This method accepts only multipart requests." - - -def test_no_upload_to_unchecked_pipe(): - with session() as s: - r = s.post( - f"{url()}/api/v2.0/resttest/test_input_unchecked_pipe", - headers={"Content-type": "application/json"}, - data='{"key": "value"}', - ) - r.raise_for_status() - job_id = r.json() - - with client() as c: - assert c.call("core.job_wait", job_id, job=True) == '{"key": "value"}NONE' - - -def test_upload_to_upload_endpoint(): - with session() as s: - r = s.post( - f"{url()}/_upload", - files={ - "data": (None, io.StringIO(json.dumps({ - "method": "resttest.test_input_pipe", - "params": [{"key": "value"}] - }))), - "file": (None, io.StringIO("FILE")), - }, - ) - r.raise_for_status() - job_id = r.json()["job_id"] - - with client() as c: - assert c.call("core.job_wait", job_id, job=True) == '{"key": "value"}FILE' diff --git a/tests/api2/test_rsync_ssh_authentication.py b/tests/api2/test_rsync_ssh_authentication.py deleted file mode 100644 index eb6f4a0a8dc80..0000000000000 --- a/tests/api2/test_rsync_ssh_authentication.py +++ /dev/null @@ -1,302 +0,0 @@ -import base64 -import contextlib -import errno -from unittest.mock import ANY - -import pytest - -from middlewared.service_exception import ValidationErrors, ValidationError -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.keychain import localhost_ssh_credentials -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.utils.unittest import RegexString - - -@contextlib.contextmanager -def task(data): - data = { - **data - } - - task = call("rsynctask.create", data) - - try: - yield task - finally: - call("rsynctask.delete", task["id"]) - - -def run_task(task, timeout=120): - call("rsynctask.run", task["id"], job=True, timeout=timeout) - - -@pytest.fixture(scope="module") -def localuser(): - with dataset("localuser_homedir") as localuser_homedir: - with user({ - "username": "localuser", - "full_name": "Local User", - "group_create": True, - "home": f"/mnt/{localuser_homedir}", - "password": "test1234", - }) as u: - yield u - - -@pytest.fixture(scope="module") -def remoteuser(): - with dataset("remoteuser_homedir") as remoteuser_homedir: - with user({ - "username": "remoteuser", - "full_name": "Remote User", - "group_create": True, - "home": f"/mnt/{remoteuser_homedir}", - "password": "test1234", - }) as u: - yield u - - -@pytest.fixture(scope="module") -def src(localuser): - with dataset("src") as src: - path = f"/mnt/{src}" - yield path - - -@pytest.fixture(scope="module") -def dst(remoteuser): - with dataset("dst") as dst: - path = f"/mnt/{dst}" - ssh(f"chown -R remoteuser:remoteuser {path}") - yield path - - -@pytest.fixture(scope="module") -def ssh_credentials(remoteuser): - with localhost_ssh_credentials(username="remoteuser") as c: - yield c - - -@pytest.fixture(scope="function") -def cleanup(localuser, src, dst): - ssh(f"rm -rf {localuser['home']}/.ssh") - ssh(f"rm -rf {src}/*", check=False) - ssh(f"touch {src}/test") - ssh(f"chown -R localuser:localuser {src}") - ssh(f"rm -rf {dst}/*", check=False) - - -def test_no_credential_provided_create(cleanup, localuser, remoteuser, src, dst): - with pytest.raises(ValidationErrors) as e: - with task({ - "path": f"{src}/", - "user": "localuser", - "remotehost": "remoteuser@localhost", - "remoteport": 22, - "mode": "SSH", - "remotepath": dst, - }): - pass - - assert e.value.errors == [ - ValidationError( - "rsync_task_create.user", - RegexString(".*you need a user with a private key.*"), - errno.EINVAL, - ) - ] - - -def test_home_directory_key_invalid_permissions(cleanup, localuser, remoteuser, src, dst, ssh_credentials): - ssh(f"mkdir {localuser['home']}/.ssh") - call( - "filesystem.file_receive", - f"{localuser['home']}/.ssh/id_rsa", - base64.b64encode(ssh_credentials["keypair"]["attributes"]["private_key"].encode("ascii")).decode("ascii"), - {"mode": 0o0644}, - ) - ssh(f"chown -R localuser:localuser {localuser['home']}/.ssh") - - with pytest.raises(ValidationErrors) as e: - with task({ - "path": f"{src}/", - "user": "localuser", - "remotehost": "remoteuser@localhost", - "remoteport": 22, - "mode": "SSH", - "remotepath": dst, - }): - pass - - assert e.value.errors == [ - ValidationError( - "rsync_task_create.user", - RegexString("Permissions 644 .* are too open.*"), - errno.EINVAL, - ) - ] - - -@pytest.mark.parametrize("validate_rpath", [True, False]) -def test_home_directory_key_not_in_known_hosts(cleanup, localuser, remoteuser, src, dst, ssh_credentials, - validate_rpath): - ssh(f"mkdir {localuser['home']}/.ssh") - call( - "filesystem.file_receive", - f"{localuser['home']}/.ssh/id_rsa", - base64.b64encode(ssh_credentials["keypair"]["attributes"]["private_key"].encode("ascii")).decode("ascii"), - {"mode": 0o600}, - ) - ssh(f"chown -R localuser:localuser {localuser['home']}/.ssh") - - with pytest.raises(ValidationErrors) as e: - with task({ - "path": f"{src}/", - "user": "localuser", - "remotehost": "remoteuser@localhost", - "remoteport": 22, - "mode": "SSH", - "remotepath": dst, - "validate_rpath": validate_rpath, - }): - pass - - assert e.value.errors == [ - ValidationError( - "rsync_task_create.remotehost", - ANY, - ValidationError.ESSLCERTVERIFICATIONERROR, - ) - ] - - -def test_ssh_keyscan_does_not_duplicate_host_keys(cleanup, localuser, remoteuser, src, dst, ssh_credentials): - ssh(f"mkdir {localuser['home']}/.ssh") - ssh(f"ssh-keyscan localhost >> {localuser['home']}/.ssh/known_hosts") - call( - "filesystem.file_receive", - f"{localuser['home']}/.ssh/id_rsa", - base64.b64encode(ssh_credentials["keypair"]["attributes"]["private_key"].encode("ascii")).decode("ascii"), - {"mode": 0o600}, - ) - ssh(f"chown -R localuser:localuser {localuser['home']}/.ssh") - - known_hosts = ssh(f"cat {localuser['home']}/.ssh/known_hosts") - - with task({ - "path": f"{src}/", - "user": "localuser", - "remotehost": "remoteuser@localhost", - "remoteport": 22, - "mode": "SSH", - "remotepath": dst, - "ssh_keyscan": True, - }) as t: - pass - - assert ssh(f"cat {localuser['home']}/.ssh/known_hosts") == known_hosts - - -def test_home_directory_key(cleanup, localuser, remoteuser, src, dst, ssh_credentials): - ssh(f"mkdir {localuser['home']}/.ssh") - call( - "filesystem.file_receive", - f"{localuser['home']}/.ssh/id_rsa", - base64.b64encode(ssh_credentials["keypair"]["attributes"]["private_key"].encode("ascii")).decode("ascii"), - {"mode": 0o600}, - ) - ssh(f"chown -R localuser:localuser {localuser['home']}/.ssh") - - with task({ - "path": f"{src}/", - "user": "localuser", - "remotehost": "remoteuser@localhost", - "remoteport": 22, - "mode": "SSH", - "remotepath": dst, - "ssh_keyscan": True, - }) as t: - run_task(t) - - assert ssh(f"ls -1 {dst}") == "test\n" - - -def test_ssh_credentials_key(cleanup, localuser, remoteuser, src, dst, ssh_credentials): - with task({ - "path": f"{src}/", - "user": "localuser", - "ssh_credentials": ssh_credentials["credentials"]["id"], - "mode": "SSH", - "remotepath": dst, - }) as t: - run_task(t) - - assert ssh(f"ls -1 {dst}") == "test\n" - - -def test_ssh_credentials_delete(cleanup, localuser, remoteuser, src, dst): - with localhost_ssh_credentials(username="remoteuser") as c: - path = f"{src}/" - with task({ - "path": path, - "user": "localuser", - "ssh_credentials": c["credentials"]["id"], - "mode": "SSH", - "remotepath": dst, - }) as t: - assert call("keychaincredential.used_by", c["credentials"]["id"]) == [ - {"title": f"Rsync task for {path!r}", "unbind_method": "disable"}, - ] - - call("keychaincredential.delete", c["credentials"]["id"], {"cascade": True}) - - t = call("rsynctask.get_instance", t["id"]) - assert not t["enabled"] - - -def test_state_persist(cleanup, localuser, remoteuser, src, dst, ssh_credentials): - with task({ - "path": f"{src}/", - "user": "localuser", - "ssh_credentials": ssh_credentials["credentials"]["id"], - "mode": "SSH", - "remotepath": dst, - }) as t: - run_task(t) - - row = call("datastore.query", "tasks.rsync", [["id", "=", t["id"]]], {"get": True}) - assert row["rsync_job"]["state"] == "SUCCESS" - - -def test_local_path_with_whitespace(cleanup, localuser, remoteuser, src, dst, ssh_credentials): - src = f"{src}/work stuff" - ssh(f"mkdir '{src}'") - ssh(f"touch '{src}/test2'") - ssh(f"chown -R localuser:localuser '{src}'") - with task({ - "path": f"{src}/", - "user": "localuser", - "ssh_credentials": ssh_credentials["credentials"]["id"], - "mode": "SSH", - "remotepath": dst, - }) as t: - run_task(t) - - assert ssh(f"ls -1 '{dst}'") == "test2\n" - - -def test_remotepath_with_whitespace(cleanup, localuser, remoteuser, src, dst, ssh_credentials): - dst = f"{dst}/work stuff" - ssh(f"mkdir '{dst}'") - ssh(f"chown remoteuser:remoteuser '{dst}'") - with task({ - "path": f"{src}/", - "user": "localuser", - "ssh_credentials": ssh_credentials["credentials"]["id"], - "mode": "SSH", - "remotepath": dst, - }) as t: - run_task(t) - - assert ssh(f"ls -1 '{dst}'") == "test\n" diff --git a/tests/api2/test_run_as_user_impl.py b/tests/api2/test_run_as_user_impl.py deleted file mode 100644 index 71f966af82e13..0000000000000 --- a/tests/api2/test_run_as_user_impl.py +++ /dev/null @@ -1,43 +0,0 @@ -import sys -import os -from contextlib import contextmanager - -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import wait_on_job -from middlewared.test.integration.utils import call, ssh - - -@contextmanager -def create_cron_job(owner, ownerGroup, user): - test_folder = ssh('mktemp -d').strip() - ssh(f'chown -R {owner}:{ownerGroup} {test_folder}') - cron = call( - 'cronjob.create', { - 'command': f'touch {test_folder}/test.txt', 'user': user, 'stderr': False, 'stdout': False} - ) - try: - yield cron - finally: - ssh(f'rm -rf {test_folder}') - - -@contextmanager -def run_cron_job(cron_id): - job_id = call('cronjob.run', cron_id) - try: - yield wait_on_job(job_id, 300) - finally: - call('cronjob.delete', cron_id) - - -def test_01_running_as_valid_user(): - with create_cron_job(owner='apps', ownerGroup='apps', user='apps') as cron_job: - with run_cron_job(cron_job['id']) as job_detail: - assert job_detail['results']['error'] is None - - -def test_02_running_as_invalid_user(): - with create_cron_job(owner='root', ownerGroup='root', user='apps') as cron_job: - with run_cron_job(cron_job['id']) as job_detail: - assert f'"{cron_job["command"]}" exited with 1' in job_detail['results']['error'], job_detail diff --git a/tests/api2/test_schema_private.py b/tests/api2/test_schema_private.py deleted file mode 100644 index 0d65767b63637..0000000000000 --- a/tests/api2/test_schema_private.py +++ /dev/null @@ -1,39 +0,0 @@ -import pytest - -from middlewared.test.integration.utils import call, client, mock, ssh - - - -def test_private_params_do_not_leak_to_logs(): - with mock("test.test1", """ - from middlewared.service import accepts - from middlewared.schema import Dict, Str - - @accepts(Dict("test", Str("password", private=True))) - async def mock(self, args): - raise Exception() - """): - log_before = ssh("cat /var/log/middlewared.log") - - with client(py_exceptions=False) as c: - with pytest.raises(Exception): - c.call("test.test1", {"password": "secret"}) - - log = ssh("cat /var/log/middlewared.log")[len(log_before):] - assert "Exception while calling test.test1(*[{'password': '********'}])" in log - - -def test_private_params_do_not_leak_to_core_get_jobs(): - with mock("test.test1", """ - from middlewared.service import accepts, job - from middlewared.schema import Dict, Str - - @accepts(Dict("test", Str("password", private=True))) - @job() - async def mock(self, job, args): - return 42 - """): - job_id = call("test.test1", {"password": "secret"}) - - job_descr = call("core.get_jobs", [["id", "=", job_id]], {"get": True}) - assert job_descr["arguments"] == [{"password": "********"}] diff --git a/tests/api2/test_serial_consoles.py b/tests/api2/test_serial_consoles.py deleted file mode 100644 index 122d29f1c9183..0000000000000 --- a/tests/api2/test_serial_consoles.py +++ /dev/null @@ -1,40 +0,0 @@ -import pytest - -from middlewared.test.integration.utils import call, ssh - -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) - - -def test_enabling_serial_port(): - ports = call('system.advanced.serial_port_choices') - assert 'ttyS0' in ports, ports - - for port in ports: - test_config = {'serialconsole': True, 'serialport': port} - config = call('system.advanced.update', test_config) - for k, v in test_config.items(): - assert config[k] == v, config - assert_serial_port_configuration({p: p == port for p in ports}) - - -def test_disabling_serial_port(): - ports = call('system.advanced.serial_port_choices') - assert 'ttyS0' in ports, ports - - for port in ports: - test_config = {'serialconsole': False, 'serialport': port} - config = call('system.advanced.update', test_config) - for k, v in test_config.items(): - assert config[k] == v, config - assert_serial_port_configuration({p: False for p in ports}) - - -def assert_serial_port_configuration(ports): - for port, enabled in ports.items(): - is_enabled = ssh(f'systemctl is-enabled serial-getty@{port}.service', False).strip() == 'enabled' - assert is_enabled is enabled, f'{port!r} enabled assertion failed: {is_enabled!r} != {enabled!r}' - is_enabled = ssh(f'systemctl is-active --quiet serial-getty@{port}.service', False, True)['returncode'] == 0 - assert is_enabled is enabled, f'{port!r} active assertion failed: {is_enabled!r} != {enabled!r}' diff --git a/tests/api2/test_sharing_service_encrypted_dataset_info.py b/tests/api2/test_sharing_service_encrypted_dataset_info.py deleted file mode 100644 index 7a8108372edce..0000000000000 --- a/tests/api2/test_sharing_service_encrypted_dataset_info.py +++ /dev/null @@ -1,105 +0,0 @@ -import contextlib -import pytest - -from middlewared.test.integration.utils import call -from middlewared.test.integration.assets.pool import dataset - - -PASSPHRASE = 'testing123' -ENCRYPTION_PARAMETERS = { - 'encryption': True, - 'encryption_options': { - 'algorithm': 'AES-256-GCM', - 'pbkdf2iters': 350000, - 'passphrase': PASSPHRASE, - }, - 'inherit_encryption': False, -} - - -@contextlib.contextmanager -def lock_dataset(dataset_name): - try: - yield call('pool.dataset.lock', dataset_name, {'force_umount': True}, job=True) - finally: - call( - 'pool.dataset.unlock', dataset_name, { - 'datasets': [{'passphrase': PASSPHRASE, 'name': dataset_name}] - }, - job=True, - ) - - -@pytest.mark.parametrize('namespace,dataset_creation_params,share_creation_params,path_field', [ - ('sharing.smb', {}, {'name': 'test_smb_share'}, 'path'), - ('sharing.nfs', {}, {}, 'path'), - ('iscsi.extent', {'type': 'VOLUME', 'volsize': 268451840, 'volblocksize': '16K'}, {'name': 'test-extend'}, 'disk'), -]) -def test_service_encrypted_dataset_default_info(namespace, dataset_creation_params, share_creation_params, path_field): - with dataset('test_sharing_locked_ds_info', data={ - **ENCRYPTION_PARAMETERS, - **dataset_creation_params, - }) as ds: - path = f'zvol/{ds}' if dataset_creation_params.get('type') == 'VOLUME' else f'/mnt/{ds}' - share_creation_params[path_field] = path - share = call(f'{namespace}.create', share_creation_params) - assert share['locked'] is False - - with lock_dataset(ds): - assert call(f'{namespace}.get_instance', share['id'])['locked'] is True - - assert call(f'{namespace}.get_instance', share['id'])['locked'] is False - - -@pytest.mark.parametrize('namespace,dataset_creation_params,share_creation_params,path_field,selected_fields', [ - ('sharing.smb', {}, {'name': 'test_smb_share'}, 'path', [['path', 'name'], ['path', 'name', 'locked']]), - ('sharing.nfs', {}, {}, 'path', [['path', 'hosts'], ['path', 'hosts', 'locked']]), - ( - 'iscsi.extent', {'type': 'VOLUME', 'volsize': 268451840, 'volblocksize': '16K'}, - {'name': 'test-extend'}, 'disk', - [['name', 'type'], ['name', 'type', 'locked']] - ), -]) -def test_service_encrypted_dataset_selected_info( - namespace, dataset_creation_params, share_creation_params, path_field, selected_fields, -): - with dataset('test_sharing_locked_ds_info', data={ - **ENCRYPTION_PARAMETERS, - **dataset_creation_params, - }) as ds: - path = f'zvol/{ds}' if dataset_creation_params.get('type') == 'VOLUME' else f'/mnt/{ds}' - share_creation_params[path_field] = path - assert call(f'{namespace}.create', share_creation_params)['locked'] is False - - with lock_dataset(ds): - for selected_field_entry in selected_fields: - for share in call(f'{namespace}.query', [], {'select': selected_field_entry}): - assert set(share) == set(selected_field_entry) - - -@pytest.mark.parametrize('namespace,dataset_creation_params,share_creation_params,path_field', [ - ('sharing.smb', {}, {'name': 'test_smb_share'}, 'path'), - ('sharing.nfs', {}, {}, 'path'), - ('iscsi.extent', {'type': 'VOLUME', 'volsize': 268451840, 'volblocksize': '16K'}, {'name': 'test-extend'}, 'disk'), -]) -def test_service_encrypted_dataset_retrieve_info_with_cache( - namespace, dataset_creation_params, share_creation_params, path_field -): - with dataset('test_sharing_locked_ds_info', data={ - **ENCRYPTION_PARAMETERS, - **dataset_creation_params, - }) as ds: - path = f'zvol/{ds}' if dataset_creation_params.get('type') == 'VOLUME' else f'/mnt/{ds}' - share = call(f'{namespace}.create', {**share_creation_params, path_field: path}) - assert share['locked'] is False - with lock_dataset(ds): - assert call( - f'{namespace}.get_instance', share['id'], {'extra': {'retrieve_locked_info': False}} - ).get('locked') is None - cached_locked_value = call( - f'{namespace}.get_instance', share['id'], {'extra': {'use_cached_locked_datasets': True}} - ) - locked_value = call( - f'{namespace}.get_instance', share['id'], {'extra': {'use_cached_locked_datasets': False}} - ) - assert cached_locked_value == locked_value diff --git a/tests/api2/test_simple_share.py b/tests/api2/test_simple_share.py deleted file mode 100644 index 20f78fd3810a0..0000000000000 --- a/tests/api2/test_simple_share.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding=utf-8 -*- -import pytest -import secrets -import string - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.assets.smb import smb_share -from middlewared.test.integration.utils import call - - -PASSWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)) - - -def test__smb_simple_share_validation(): - existing_smb_users = [x['username'] for x in call('user.query', [['smb', '=', True]])] - assert len(existing_smb_users) == 0, str(existing_smb_users) - - with pytest.raises(ValidationErrors): - call('sharing.smb.share_precheck') - - with user({ - "username": "simple_share_user", - "full_name": "simple_share_user", - "group_create": True, - "password": PASSWD, - "smb": True, - }): - # First check that basic call of this endpoint succeeds - call('sharing.smb.share_precheck') - - # Verify works with basic share name - call('sharing.smb.share_precheck', {'name': 'test_share'}) - - # Verify raises error if share name invalid - with pytest.raises(ValidationErrors): - call('sharing.smb.share_precheck', {'name': 'test_share*'}) - - # Another variant of invalid name - with pytest.raises(ValidationErrors): - call('sharing.smb.share_precheck', {'name': 'gLobaL'}) - - with dataset('test_smb') as ds: - with smb_share(f'/mnt/{ds}', 'test_share'): - with pytest.raises(ValidationErrors): - call('sharing.smb.share_precheck', {'name': 'test_share'}) diff --git a/tests/api2/test_smart_test_crud.py b/tests/api2/test_smart_test_crud.py deleted file mode 100644 index e17b83da4bd5d..0000000000000 --- a/tests/api2/test_smart_test_crud.py +++ /dev/null @@ -1,100 +0,0 @@ -import contextlib -import re - -import pytest - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.utils import call - - -@contextlib.contextmanager -def smart_test(data): - test = call("smart.test.create", data) - try: - yield test - finally: - call("smart.test.delete", test["id"]) - - -def smart_test_disks(all_disks=False, disk_index=0): - if all_disks: - return {"all_disks": True} - else: - return {"disks": [sorted(call("smart.test.disk_choices").keys())[disk_index]]} - - -@pytest.mark.parametrize("existing_all_disks", [False, True]) -@pytest.mark.parametrize("new_all_disks", [False, True]) -def test_smart_test_already_has_tests_for_this_type(existing_all_disks, new_all_disks): - if existing_all_disks: - error = "There already is an all-disks SHORT test" - else: - error = "The following disks already have SHORT test: sd[a-z]" - - with smart_test({ - "schedule": { - "hour": "0", - "dom": "*", - "month": "*", - "dow": "*", - }, - **smart_test_disks(existing_all_disks), - "type": "SHORT", - }): - with pytest.raises(ValidationErrors) as ve: - with smart_test({ - "schedule": { - "hour": "1", - "dom": "*", - "month": "*", - "dow": "*", - }, - **smart_test_disks(new_all_disks), - "type": "SHORT", - }): - pass - - assert re.fullmatch(error, ve.value.errors[0].errmsg) - - -@pytest.mark.parametrize("existing_all_disks", [False, True]) -@pytest.mark.parametrize("new_all_disks", [False, True]) -def test_smart_test_intersect(existing_all_disks, new_all_disks): - with smart_test({ - "schedule": { - "hour": "3", - "dom": "1", - "month": "*", - "dow": "*", - }, - **smart_test_disks(existing_all_disks), - "type": "LONG", - }): - with pytest.raises(ValidationErrors) as ve: - with smart_test({ - "schedule": { - "hour": "3", - "dom": "*", - "month": "*", - "dow": "1", - }, - **smart_test_disks(existing_all_disks), - "type": "SHORT", - }): - pass - - assert ve.value.errors[0].errmsg == "A LONG test already runs at Day 1st of every month, Mon, 03:00" - - -def test_smart_test_update(): - with smart_test({ - "schedule": { - "hour": "0", - "dom": "*", - "month": "*", - "dow": "*", - }, - **smart_test_disks(True), - "type": "SHORT", - }) as test: - call("smart.test.update", test["id"], {}) diff --git a/tests/api2/test_smart_test_run.py b/tests/api2/test_smart_test_run.py deleted file mode 100644 index e753830fb6629..0000000000000 --- a/tests/api2/test_smart_test_run.py +++ /dev/null @@ -1,64 +0,0 @@ -import contextlib -import re -import time - -import pytest - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.utils import call, client, mock - - -@pytest.fixture(scope="function") -def short_test(): - disk = call("disk.query")[0] - with mock("smart.test.disk_choices", return_value={disk["identifier"]: disk}): - with mock("disk.smartctl", return_value="Self Test has begun"): - with mock("smart.test.results", """\ - i = 0 - async def mock(self, *args): - global i - if i > 100: - return {"current_test": None} - else: - result = {"current_test": {"progress": i}} - i += 30 - return result - """): - result = call("smart.test.manual_test", [{"identifier": disk["identifier"], "type": "SHORT"}]) - yield result[0] - - -def test_smart_test_job_progress(short_test): - progresses = set() - for i in range(30): - job = call("core.get_jobs", [["id", "=", short_test["job"]]], {"get": True}) - if job["state"] == "RUNNING": - progresses.add(job["progress"]["percent"]) - time.sleep(5) - elif job["state"] == "SUCCESS": - break - else: - assert False, job - else: - assert False - - assert progresses == {0, 30, 60, 90} - - -def test_smart_test_event_source(short_test): - progresses = set() - - def callback(event_type, **kwargs): - progresses.add(kwargs['fields']['progress']) - - with client() as c: - c.subscribe(f"smart.test.progress:{short_test['disk']}", callback, sync=True) - - for i in range(30): - if None in progresses: - assert progresses - {0} == {30, 60, 90, None} - break - else: - time.sleep(5) - else: - assert False diff --git a/tests/api2/test_smb_client.py b/tests/api2/test_smb_client.py deleted file mode 100644 index c9b98eeef39ef..0000000000000 --- a/tests/api2/test_smb_client.py +++ /dev/null @@ -1,243 +0,0 @@ -import os -import pytest - -from middlewared.test.integration.assets.account import user, group -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.assets.smb import ( - del_stream, get_stream, list_stream, set_stream, set_xattr_compat, - smb_share, smb_mount -) -from middlewared.test.integration.utils import call, client, ssh - - -PERMSET = { - "READ_DATA": False, - "WRITE_DATA": False, - "APPEND_DATA": False, - "READ_NAMED_ATTRS": False, - "WRITE_NAMED_ATTRS": False, - "EXECUTE": False, - "DELETE_CHILD": False, - "READ_ATTRIBUTES": False, - "WRITE_ATTRIBUTES": False, - "DELETE": False, - "READ_ACL": False, - "WRITE_ACL": False, - "WRITE_OWNER": False, - "SYNCHRONIZE": True -} - -SAMPLE_ENTRY = { - "tag": "GROUP", - "id": 666, - "type": "ALLOW", - "perms": PERMSET, - "flags": {"BASIC": "INHERIT"} -} - -PERSISTENT_ACL = [ - { - "tag": "GROUP", - "id": 545, - "type": "ALLOW", - "perms": {"BASIC": "FULL_CONTROL"}, - "flags": {"BASIC": "INHERIT"} - } -] - -TMP_SMB_USER_PASSWORD = 'Abcd1234$' - - -@pytest.fixture(scope='module') -def setup_smb_tests(request): - with dataset('smbclient-testing', data={'share_type': 'SMB'}) as ds: - with user({ - 'username': 'smbuser', - 'full_name': 'smbuser', - 'group_create': True, - 'password': TMP_SMB_USER_PASSWORD - }) as u: - with smb_share(os.path.join('/mnt', ds), 'client_share') as s: - try: - call('service.start', 'cifs') - yield {'dataset': ds, 'share': s, 'user': u} - finally: - call('service.stop', 'cifs') - - -@pytest.fixture(scope='module') -def mount_share(setup_smb_tests): - with smb_mount(setup_smb_tests['share']['name'], 'smbuser', TMP_SMB_USER_PASSWORD) as mp: - yield setup_smb_tests | {'mountpoint': mp} - - -def compare_acls(local_path, share_path): - local_acl = call('filesystem.getacl', local_path) - local_acl.pop('path') - smb_acl = call('filesystem.getacl', share_path) - smb_acl.pop('path') - assert local_acl == smb_acl - - -def test_smb_mount(request, mount_share): - assert call('filesystem.statfs', mount_share['mountpoint'])['fstype'] == 'cifs' - - -def test_acl_share_root(request, mount_share): - compare_acls(mount_share['share']['path'], mount_share['mountpoint']) - - -def test_acl_share_subdir(request, mount_share): - call('filesystem.mkdir', { - 'path': os.path.join(mount_share['share']['path'], 'testdir'), - 'options': {'raise_chmod_error': False}, - }) - - compare_acls( - os.path.join(mount_share['share']['path'], 'testdir'), - os.path.join(mount_share['mountpoint'], 'testdir') - ) - - -def test_acl_share_file(request, mount_share): - ssh(f'touch {os.path.join(mount_share["share"]["path"], "testfile")}') - - compare_acls( - os.path.join(mount_share['share']['path'], 'testfile'), - os.path.join(mount_share['mountpoint'], 'testfile') - ) - - -@pytest.mark.parametrize('perm', PERMSET.keys()) -def test_acl_share_permissions(request, mount_share, perm): - assert call('filesystem.statfs', mount_share['mountpoint'])['fstype'] == 'cifs' - - SAMPLE_ENTRY['perms'] | {perm: True} - payload = { - 'path': mount_share['share']['path'], - 'dacl': [SAMPLE_ENTRY] + PERSISTENT_ACL - } - call('filesystem.setacl', payload, job=True) - compare_acls(mount_share['share']['path'], mount_share['mountpoint']) - - -@pytest.mark.parametrize('flagset', [ - { - 'FILE_INHERIT': True, - 'DIRECTORY_INHERIT': True, - 'NO_PROPAGATE_INHERIT': False, - 'INHERIT_ONLY': False, - 'INHERITED': False, - }, - { - 'FILE_INHERIT': True, - 'DIRECTORY_INHERIT': False, - 'NO_PROPAGATE_INHERIT': False, - 'INHERIT_ONLY': False, - 'INHERITED': False, - }, - { - 'FILE_INHERIT': False, - 'DIRECTORY_INHERIT': True, - 'NO_PROPAGATE_INHERIT': False, - 'INHERIT_ONLY': False, - 'INHERITED': False, - }, - { - 'FILE_INHERIT': False, - 'DIRECTORY_INHERIT': False, - 'NO_PROPAGATE_INHERIT': False, - 'INHERIT_ONLY': False, - 'INHERITED': False, - }, - { - 'FILE_INHERIT': True, - 'DIRECTORY_INHERIT': False, - 'NO_PROPAGATE_INHERIT': False, - 'INHERIT_ONLY': True, - 'INHERITED': False, - }, - { - 'FILE_INHERIT': False, - 'DIRECTORY_INHERIT': True, - 'NO_PROPAGATE_INHERIT': False, - 'INHERIT_ONLY': True, - 'INHERITED': False, - }, - { - 'FILE_INHERIT': True, - 'DIRECTORY_INHERIT': False, - 'NO_PROPAGATE_INHERIT': True, - 'INHERIT_ONLY': True, - 'INHERITED': False, - }, - { - 'FILE_INHERIT': False, - 'DIRECTORY_INHERIT': True, - 'NO_PROPAGATE_INHERIT': True, - 'INHERIT_ONLY': True, - 'INHERITED': False, - } -]) -def test_acl_share_flags(request, mount_share, flagset): - assert call('filesystem.statfs', mount_share['mountpoint'])['fstype'] == 'cifs' - - SAMPLE_ENTRY['flags'] = flagset - payload = { - 'path': mount_share['share']['path'], - 'dacl': [SAMPLE_ENTRY] + PERSISTENT_ACL - } - call('filesystem.setacl', payload, job=True) - compare_acls(mount_share['share']['path'], mount_share['mountpoint']) - - -def do_stream_ops(fname, samba_compat): - set_xattr_compat(samba_compat) - - assert list_stream(fname) == [] - - data_to_write = b'canary' - if samba_compat: - data_to_write += b'\x00' - - # test basic get / set - set_stream(fname, 'teststream', data_to_write) - - assert list_stream(fname) == ['teststream'] - - xat_data = get_stream(fname, 'teststream') - assert xat_data == data_to_write - - data_to_write = b'can' - if samba_compat: - data_to_write += b'\x00' - - # test that stream is appropriately truncated - set_stream(fname, 'teststream', data_to_write) - - xat_data = get_stream(fname, 'teststream') - assert xat_data == data_to_write - - # test that stream can be deleted - del_stream(fname, 'teststream') - - assert list_stream(fname) == [] - - -@pytest.mark.parametrize("is_dir", [True, False]) -@pytest.mark.parametrize("samba_compat", [True, False]) -def test_get_set_del_stream(request, mount_share, is_dir, samba_compat): - assert call('filesystem.statfs', mount_share['mountpoint'])['fstype'] == 'cifs' - if is_dir: - fname = os.path.join(mount_share['mountpoint'], 'testdirstream') - call('filesystem.mkdir', {'path': fname, 'options': {'raise_chmod_error': False}}) - cleanup = f'rmdir {fname}' - else: - fname = os.path.join(mount_share['mountpoint'], 'testfilestream') - ssh(f'touch {fname}') - cleanup = f'rm {fname}' - - try: - do_stream_ops(fname, samba_compat) - finally: - ssh(cleanup) diff --git a/tests/api2/test_smb_encryption.py b/tests/api2/test_smb_encryption.py deleted file mode 100644 index 61493cfbef063..0000000000000 --- a/tests/api2/test_smb_encryption.py +++ /dev/null @@ -1,133 +0,0 @@ -import os -import pytest - -from contextlib import contextmanager -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.assets.smb import smb_share -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call - -from protocols import smb_connection - -SHAREUSER = 'smbuser420' -PASSWD = 'abcd1234' -SMB_NAME = 'enc_share' - - -@pytest.fixture(scope='module') -def smb_setup(request): - with dataset('smb-encrypt', data={'share_type': 'SMB'}) as ds: - with user({ - 'username': SHAREUSER, - 'full_name': SHAREUSER, - 'group_create': True, - 'password': PASSWD - }, get_instance=False): - with smb_share(os.path.join('/mnt', ds), SMB_NAME) as s: - try: - call('service.start', 'cifs') - yield {'dataset': ds, 'share': s} - finally: - call('service.stop', 'cifs') - - -@contextmanager -def server_encryption(param): - call('smb.update', {'encryption': param}) - - try: - yield - finally: - call('smb.update', {'encryption': 'DEFAULT'}) - - -def test__smb_client_encrypt_default(smb_setup): - with smb_connection( - share=smb_setup['share']['name'], - username=SHAREUSER, - password=PASSWD, - encryption='DEFAULT' - ) as c: - # perform basic op to fully initialize SMB session - assert c.get_smb_encryption() == 'DEFAULT' - - c.ls('/') - smb_status = call('smb.status')[0] - - # check session - assert smb_status['encryption']['cipher'] == '-' - assert smb_status['encryption']['degree'] == 'none' - - # check share - assert smb_status['share_connections'][0]['encryption']['cipher'] == '-' - assert smb_status['share_connections'][0]['encryption']['degree'] == 'none' - - -def test__smb_client_encrypt_desired(smb_setup): - with smb_connection( - share=smb_setup['share']['name'], - username=SHAREUSER, - password=PASSWD, - encryption='DESIRED' - ) as c: - assert c.get_smb_encryption() == 'DESIRED' - - # perform basic op to fully initialize SMB session - c.ls('/') - smb_status = call('smb.status')[0] - - # check session - assert smb_status['encryption']['cipher'] == 'AES-128-GCM' - assert smb_status['encryption']['degree'] == 'partial' - - # check share - assert smb_status['share_connections'][0]['encryption']['cipher'] == 'AES-128-GCM' - assert smb_status['share_connections'][0]['encryption']['degree'] == 'full' - - -def test__smb_client_encrypt_required(smb_setup): - with smb_connection( - share=smb_setup['share']['name'], - username=SHAREUSER, - password=PASSWD, - encryption='REQUIRED' - ) as c: - assert c.get_smb_encryption() == 'REQUIRED' - - # perform basic op to fully initialize SMB session - c.ls('/') - smb_status = call('smb.status')[0] - - # check session - assert smb_status['encryption']['cipher'] == 'AES-128-GCM' - assert smb_status['encryption']['degree'] == 'partial' - - # check share - assert smb_status['share_connections'][0]['encryption']['cipher'] == 'AES-128-GCM' - assert smb_status['share_connections'][0]['encryption']['degree'] == 'full' - - -@pytest.mark.parametrize('enc_param', ('DESIRED', 'REQUIRED')) -def test__smb_client_server_encrypt(smb_setup, enc_param): - with server_encryption(enc_param): - with smb_connection( - share=smb_setup['share']['name'], - username=SHAREUSER, - password=PASSWD, - encryption='DEFAULT' - ) as c: - # check that client credential desired encryption is - # set to expected value - assert c.get_smb_encryption() == 'DEFAULT' - - # perform basic op to fully initialize SMB session - c.ls('/') - smb_status = call('smb.status')[0] - - # check session - assert smb_status['encryption']['cipher'] == 'AES-128-GCM' - assert smb_status['encryption']['degree'] == 'full' - - # check share - assert smb_status['share_connections'][0]['encryption']['cipher'] == 'AES-128-GCM' - assert smb_status['share_connections'][0]['encryption']['degree'] == 'full' diff --git a/tests/api2/test_smb_groupmap.py b/tests/api2/test_smb_groupmap.py deleted file mode 100644 index e914fa933753c..0000000000000 --- a/tests/api2/test_smb_groupmap.py +++ /dev/null @@ -1,73 +0,0 @@ -import pytest - -from middlewared.test.integration.utils import call -from middlewared.test.integration.assets.account import group - -BASE_RID_GROUP = 200000 - - -@pytest.mark.parametrize('groupname,expected_memberof,expected_rid', [ - ('builtin_administrators', 'S-1-5-32-544', 512), - ('builtin_guests', 'S-1-5-32-546', 514) -]) -def test__local_builtin_accounts(groupname, expected_memberof, expected_rid): - entry = call('group.query', [['group', '=', groupname]], {'get': True}) - rid = int(entry['sid'].split('-')[-1]) - assert rid == expected_rid - - groupmap = call('smb.groupmap_list') - assert str(entry['gid']) in groupmap['local_builtins'] - assert groupmap['local_builtins'][str(entry['gid'])]['sid'] == entry['sid'] - - members = call('smb.groupmap_listmem', expected_memberof) - assert entry['sid'] in members - - -def test__local_builtin_users_account(): - entry = call('group.query', [['group', '=', 'builtin_users']], {'get': True}) - - rid = int(entry['sid'].split('-')[-1]) - assert rid == entry['id'] + BASE_RID_GROUP - - members_dom_users = call('smb.groupmap_listmem', 'S-1-5-32-545') - assert entry['sid'] in members_dom_users - - -def test__new_group(): - with group({"name": "group1"}) as g: - # Validate GID is being assigned as expected - assert g['sid'] is not None - rid = int(g['sid'].split('-')[-1]) - assert rid == g['id'] + BASE_RID_GROUP - - groupmap = call('smb.groupmap_list') - - assert groupmap['local'][str(g['gid'])]['sid'] == g['sid'] - - # Validate that disabling SMB removes SID value from query results - call('group.update', g['id'], {'smb': False}) - - new = call('group.get_instance', g['id']) - assert new['sid'] is None - - # Check for presence in group_mapping.tdb - groupmap = call('smb.groupmap_list') - assert new['gid'] not in groupmap['local'] - - # Validate that re-enabling restores SID value - call('group.update', g['id'], {'smb': True}) - new = call('group.get_instance', g['id']) - assert new['sid'] == g['sid'] - - groupmap = call('smb.groupmap_list') - assert str(new['gid']) in groupmap['local'] - - -@pytest.mark.parametrize('name,gid,sid', [ - ('Administrators', 90000001, 'S-1-5-32-544'), - ('Users', 90000002, 'S-1-5-32-545'), - ('Guests', 90000003, 'S-1-5-32-546') -]) -def test__builtins(name, gid, sid): - builtins = call('smb.groupmap_list')['builtins'] - assert str(gid) in builtins diff --git a/tests/api2/test_smb_share_crud_roles.py b/tests/api2/test_smb_share_crud_roles.py deleted file mode 100644 index 4a43998b8c88e..0000000000000 --- a/tests/api2/test_smb_share_crud_roles.py +++ /dev/null @@ -1,78 +0,0 @@ -import pytest - -from middlewared.service_exception import ValidationErrors -from middlewared.test.integration.assets.account import unprivileged_user_client -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.assets.roles import common_checks -from middlewared.test.integration.assets.smb import smb_share -from middlewared.test.integration.utils import call - - -@pytest.fixture(scope='module') -def create_dataset(): - with dataset('smb_roles_test') as ds: - yield ds - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_SMB_READ"]) -def test_read_role_can_read(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "sharing.smb.query", role, True, valid_role_exception=False) - - -@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_SMB_READ"]) -def test_read_role_cant_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "sharing.smb.create", role, False) - common_checks(unprivileged_user_fixture, "sharing.smb.update", role, False) - common_checks(unprivileged_user_fixture, "sharing.smb.delete", role, False) - - common_checks(unprivileged_user_fixture, "sharing.smb.getacl", role, True) - common_checks(unprivileged_user_fixture, "sharing.smb.setacl", role, False) - common_checks(unprivileged_user_fixture, "smb.status", role, False) - - -@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_SMB_WRITE"]) -def test_write_role_can_write(unprivileged_user_fixture, role): - common_checks(unprivileged_user_fixture, "sharing.smb.create", role, True) - common_checks(unprivileged_user_fixture, "sharing.smb.update", role, True) - common_checks(unprivileged_user_fixture, "sharing.smb.delete", role, True) - - common_checks(unprivileged_user_fixture, "sharing.smb.getacl", role, True) - common_checks(unprivileged_user_fixture, "sharing.smb.setacl", role, True) - common_checks(unprivileged_user_fixture, "smb.status", role, True, valid_role_exception=False) - - common_checks( - unprivileged_user_fixture, "service.start", role, True, method_args=["cifs"], valid_role_exception=False - ) - common_checks( - unprivileged_user_fixture, "service.restart", role, True, method_args=["cifs"], valid_role_exception=False - ) - common_checks( - unprivileged_user_fixture, "service.reload", role, True, method_args=["cifs"], valid_role_exception=False - ) - common_checks( - unprivileged_user_fixture, "service.stop", role, True, method_args=["cifs"], valid_role_exception=False - ) - - -@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_SMB_WRITE"]) -def test_auxsmbconf_rejected_create(create_dataset, role): - share = None - with unprivileged_user_client(roles=[role]) as c: - with pytest.raises(ValidationErrors) as ve: - try: - share = c.call('sharing.smb.create', { - 'name': 'FAIL', - 'path': f'/mnt/{create_dataset}', - 'auxsmbconf': 'test:param = CANARY' - }) - finally: - if share: - call('sharing.smb.delete', share['id']) - - -@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_SMB_WRITE"]) -def test_auxsmbconf_rejected_update(create_dataset, role): - with smb_share(f'/mnt/{create_dataset}', 'FAIL') as share: - with unprivileged_user_client(roles=[role]) as c: - with pytest.raises(ValidationErrors): - c.call('sharing.smb.update', share['id'], {'auxsmbconf': 'test:param = Bob'}) diff --git a/tests/api2/test_snapshot_query.py b/tests/api2/test_snapshot_query.py deleted file mode 100644 index a1245b5371e3f..0000000000000 --- a/tests/api2/test_snapshot_query.py +++ /dev/null @@ -1,60 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.pool import dataset, pool, another_pool -from middlewared.test.integration.utils import call - - - -@pytest.fixture(scope="module") -def fixture1(): - with another_pool(): - with dataset("test"): - with dataset("test/test1"): - with dataset("test/test2"): - with dataset("test", pool="test"): - with dataset("test/test1", pool="test"): - with dataset("test/test2", pool="test"): - call( - "zfs.snapshot.create", - {"dataset": f"{pool}/test", "name": "snap-1", "recursive": True}, - ) - call( - "zfs.snapshot.create", - {"dataset": f"{pool}/test", "name": "snap-2", "recursive": True}, - ) - call( - "zfs.snapshot.create", - {"dataset": "test/test", "name": "snap-1", "recursive": True}, - ) - call( - "zfs.snapshot.create", - {"dataset": "test/test", "name": "snap-2", "recursive": True}, - ) - yield - - -def test_query_all_names(fixture1): - names = { - snapshot["name"] - for snapshot in call("zfs.snapshot.query", [], {"select": ["name"]}) - } - assert {f"{pool}/test@snap-1", f"{pool}/test@snap-2", f"{pool}/test/test1@snap-1", f"{pool}/test/test1@snap-2", - f"{pool}/test/test2@snap-1", f"{pool}/test/test2@snap-2", - f"test/test@snap-1", f"test/test@snap-2", f"test/test/test1@snap-1", f"test/test/test1@snap-2", - f"test/test/test2@snap-1", f"test/test/test2@snap-2"}.issubset(names) - - -@pytest.mark.parametrize("filters,names", [ - ([["pool", "=", "test"]], {f"test/test@snap-1", f"test/test@snap-2", f"test/test/test1@snap-1", - f"test/test/test1@snap-2", f"test/test/test2@snap-1", f"test/test/test2@snap-2"}), - ([["dataset", "=", f"{pool}/test"]], {f"{pool}/test@snap-1", f"{pool}/test@snap-2"}), - ([["dataset", "in", [f"{pool}/test/test1", "test/test/test2"]]], {f"{pool}/test/test1@snap-1", - f"{pool}/test/test1@snap-2", - f"test/test/test2@snap-1", - f"test/test/test2@snap-2"}), -]) -def test_query_names_by_pool_or_dataset(fixture1, filters, names): - assert { - snapshot["name"] - for snapshot in call("zfs.snapshot.query", filters, {"select": ["name"]}) - } == names diff --git a/tests/api2/test_snapshot_task.py b/tests/api2/test_snapshot_task.py deleted file mode 100644 index 210651f12b837..0000000000000 --- a/tests/api2/test_snapshot_task.py +++ /dev/null @@ -1,42 +0,0 @@ -import pytest - -from middlewared.service_exception import InstanceNotFound -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.assets.snapshot_task import snapshot_task -from middlewared.test.integration.utils import call - -import sys -import os -apifolder = os.getcwd() -sys.path.append(apifolder) - - -def test_snapshot_task_is_not_deleted_when_deleting_a_child_dataset(): - with dataset("parent") as parent: - with dataset("parent/child") as child: - with snapshot_task({ - "dataset": parent, - "recursive": True, - "lifetime_value": 1, - "lifetime_unit": "DAY", - "naming_schema": "%Y%m%d%H%M", - }) as t: - call("pool.dataset.delete", child) - - assert call("pool.snapshottask.get_instance", t["id"]) - - -def test_snapshot_task_is_deleted_when_deleting_a_parent_dataset(): - with dataset("parent") as parent: - with dataset("parent/child") as child: - with snapshot_task({ - "dataset": child, - "recursive": True, - "lifetime_value": 1, - "lifetime_unit": "DAY", - "naming_schema": "%Y%m%d%H%M", - }) as t: - call("pool.dataset.delete", parent, {"recursive": True}) - - with pytest.raises(InstanceNotFound): - assert call("pool.snapshottask.get_instance", t["id"]) diff --git a/tests/api2/test_snapshot_task_retention.py b/tests/api2/test_snapshot_task_retention.py deleted file mode 100644 index 72dec238e940d..0000000000000 --- a/tests/api2/test_snapshot_task_retention.py +++ /dev/null @@ -1,112 +0,0 @@ -from datetime import datetime -from unittest.mock import ANY - -import pytz - -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.assets.snapshot_task import snapshot_task -from middlewared.test.integration.utils import assert_creates_job, call - - -def test_change_retention(): - tz = pytz.timezone(call("system.info")["timezone"]) - - with dataset("snapshottask-retention-test") as ds: - call("zettarepl.load_removal_dates") - - with snapshot_task({ - "dataset": ds, - "recursive": True, - "exclude": [], - "lifetime_value": 10, - "lifetime_unit": "YEAR", - "naming_schema": "auto-%Y-%m-%d-%H-%M-1y", - "schedule": { - "minute": "*", - }, - }) as task: - call("zfs.snapshot.create", { - "dataset": ds, - "name": "auto-2021-04-12-06-30-1y", - }) - - result = call("zfs.snapshot.query", [["id", "=", f"{ds}@auto-2021-04-12-06-30-1y"]], - {"get": True, "extra": {"retention": True}}) - assert result["retention"] == { - "datetime": ANY, - "source": "periodic_snapshot_task", - "periodic_snapshot_task_id": task["id"], - } - assert result["retention"]["datetime"].astimezone(tz) == tz.localize(datetime(2031, 4, 10, 6, 30)) - - result = call("pool.snapshottask.update_will_change_retention_for", task["id"], { - "naming_schema": "auto-%Y-%m-%d-%H-%M-365d", - }) - assert result == { - ds: ["auto-2021-04-12-06-30-1y"], - } - - with assert_creates_job("pool.snapshottask.fixate_removal_date") as job: - call("pool.snapshottask.update", task["id"], { - "naming_schema": "auto-%Y-%m-%d-%H-%M-365d", - "fixate_removal_date": True, - }) - - call("core.job_wait", job.id, job=True) - - result = call("zfs.snapshot.query", [["id", "=", f"{ds}@auto-2021-04-12-06-30-1y"]], - {"get": True, "extra": {"retention": True}}) - properties = [v for k, v in result["properties"].items() if k.startswith("org.truenas:destroy_at_")] - assert properties, result["properties"] - assert properties[0]["value"] == "2031-04-10T06:30:00" - assert result["retention"] == { - "datetime": ANY, - "source": "property", - } - assert result["retention"]["datetime"].astimezone(tz) == tz.localize(datetime(2031, 4, 10, 6, 30)) - - -def test_delete_retention(): - tz = pytz.timezone(call("system.info")["timezone"]) - - with dataset("snapshottask-retention-test-2") as ds: - call("zettarepl.load_removal_dates") - - with snapshot_task({ - "dataset": ds, - "recursive": True, - "exclude": [], - "lifetime_value": 10, - "lifetime_unit": "YEAR", - "naming_schema": "auto-%Y-%m-%d-%H-%M-1y", - "schedule": { - "minute": "*", - }, - }) as task: - call("zfs.snapshot.create", { - "dataset": ds, - "name": "auto-2021-04-12-06-30-1y", - }) - - result = call("pool.snapshottask.delete_will_change_retention_for", task["id"]) - assert result == { - ds: ["auto-2021-04-12-06-30-1y"], - } - - with assert_creates_job("pool.snapshottask.fixate_removal_date") as job: - call("pool.snapshottask.delete", task["id"], { - "fixate_removal_date": True, - }) - - call("core.job_wait", job.id, job=True) - - result = call("zfs.snapshot.query", [["id", "=", f"{ds}@auto-2021-04-12-06-30-1y"]], - {"get": True, "extra": {"retention": True}}) - properties = [v for k, v in result["properties"].items() if k.startswith("org.truenas:destroy_at_")] - assert properties, result["properties"] - assert properties[0]["value"] == "2031-04-10T06:30:00" - assert result["retention"] == { - "datetime": ANY, - "source": "property", - } - assert result["retention"]["datetime"].astimezone(tz) == tz.localize(datetime(2031, 4, 10, 6, 30)) diff --git a/tests/api2/test_snapshots.py b/tests/api2/test_snapshots.py deleted file mode 100644 index 4cff87b341bed..0000000000000 --- a/tests/api2/test_snapshots.py +++ /dev/null @@ -1,56 +0,0 @@ -import errno - -import pytest - -from middlewared.service_exception import CallError -from middlewared.test.integration.assets.pool import dataset -from middlewared.test.integration.utils import call - - -def common_min_max_txg_snapshot_test(test_min_txg=False, test_max_txg=False): - assert all(i is False for i in (test_min_txg, test_max_txg)) is False - - with dataset('test') as test_dataset: - created_snaps = [] - total_snaps = 20 - for i in range(total_snaps): - created_snaps.append(int(call( - 'zfs.snapshot.create', {'dataset': test_dataset, 'name': f'snap_{i}'} - )['properties']['createtxg']['value'])) - - assert call('zfs.snapshot.query', [['dataset', '=', test_dataset]], {'count': True}) == len(created_snaps) - - for i in range(total_snaps // 2 - 1): - new_list = created_snaps - extra_args = {} - if test_min_txg: - new_list = created_snaps[i:] - extra_args['min_txg'] = new_list[0] - if test_max_txg: - new_list = new_list[:len(new_list) // 2] - extra_args['max_txg'] = new_list[-1] - - assert call( - 'zfs.snapshot.query', [['dataset', '=', test_dataset]], {'count': True, 'extra': extra_args} - ) == len(new_list) - - -def test_min_txg_snapshot_query(): - common_min_max_txg_snapshot_test(True, False) - - -def test_max_txg_snapshot_query(): - common_min_max_txg_snapshot_test(False, True) - - -def test_min_max_txg_snapshot_query(): - common_min_max_txg_snapshot_test(True, True) - - -def test_already_exists(): - with dataset('test') as test_dataset: - call('zfs.snapshot.create', {'dataset': test_dataset, 'name': 'snap'}) - with pytest.raises(CallError) as ve: - call('zfs.snapshot.create', {'dataset': test_dataset, 'name': 'snap'}) - - assert ve.value.errno == errno.EEXIST diff --git a/tests/api2/test_snmp_agent.py b/tests/api2/test_snmp_agent.py deleted file mode 100644 index 40a67dc2addf1..0000000000000 --- a/tests/api2/test_snmp_agent.py +++ /dev/null @@ -1,38 +0,0 @@ -import re -import subprocess -import tempfile -import time - -import pytest - -from middlewared.test.integration.utils import call, host, ssh - - -@pytest.fixture() -def snmpd_running(): - call("service.start", "snmp") - time.sleep(2) - yield - - -def test_truenas_mib_elements(snmpd_running): - mib_file = "/usr/local/share/snmp/mibs/TRUENAS-MIB.txt" - with tempfile.NamedTemporaryFile(mode='w') as f: - lines = ssh(f'cat {mib_file}') - assert lines - - f.writelines(lines) - f.flush() - - snmp = subprocess.run( - f"snmpwalk -v2c -c public -m {f.name} {host().ip} " - "1.3.6.1.4.1.50536", - shell=True, - capture_output=True, - text=True, - ) - assert snmp.returncode == 0, snmp.stderr - assert "TRUENAS-MIB::zpoolName.1 = STRING: boot-pool\n" in snmp.stdout - assert re.search( - r"^TRUENAS-MIB::zfsArcSize\.0 = Gauge32: ([1-9][0-9]+)\n", snmp.stdout, re.MULTILINE - ), snmp.stdout diff --git a/tests/api2/test_system_advanced.py b/tests/api2/test_system_advanced.py deleted file mode 100644 index 3f0351dd0df26..0000000000000 --- a/tests/api2/test_system_advanced.py +++ /dev/null @@ -1,34 +0,0 @@ -import pytest - -from middlewared.service_exception import ValidationErrors, ValidationError -from middlewared.test.integration.utils import call, ssh - - -@pytest.mark.parametrize( - 'key,value,grep_file,sshd_config_cmd,validation_error', [ - ('motd', 'TrueNAS Message Of The Day', '/etc/motd', None, ''), - ('login_banner', 'TrueNAS Login Banner', '/etc/login_banner', 'grep Banner /etc/ssh/sshd_config', ''), - ('kernel_extra_options', 'zfs_arc_min=21474836480', None, None, ''), - ('kernel_extra_options', '', None, None, ''), - ('kernel_extra_options', 'zfs_arc_min=<21474836480>', None, None, 'Invalid syntax'), - ], - ids=[ - 'Test MOTD', - 'Test Login Banner', - 'Test Valid Kernel Extra Options 1', - 'Test Valid Kernel Extra Options 2', - 'Test Invalid Kernel Extra Options 1', - ], -) -def test_(key, value, grep_file, sshd_config_cmd, validation_error): - if not validation_error: - call('system.advanced.update', {key: value}) - assert call('system.advanced.config')[key] == value - if grep_file is not None: - assert ssh(f'grep "{value}" {grep_file}', complete_response=True)['result'] - if sshd_config_cmd is not None: - assert ssh(sshd_config_cmd, complete_response=True)['result'] - else: - with pytest.raises(ValidationErrors) as ve: - call('system.advanced.update', {key: value}) - assert ve.value.errors == [ValidationError(key, validation_error)] diff --git a/tests/api2/test_system_dataset.py b/tests/api2/test_system_dataset.py deleted file mode 100644 index 46c49778fefb7..0000000000000 --- a/tests/api2/test_system_dataset.py +++ /dev/null @@ -1,79 +0,0 @@ -import errno -import os -import pytest - -from middlewared.service_exception import CallError -from middlewared.test.integration.assets.pool import another_pool -from middlewared.test.integration.utils import call, pool - - -PASSPHRASE = 'passphrase' - - -@pytest.fixture(scope="module") -def passphrase_encrypted_pool_session(): - with another_pool({"encryption": True, "encryption_options": {"passphrase": PASSPHRASE}}) as p: - yield p["name"] - - -@pytest.fixture(scope="function") -def passphrase_encrypted_pool(passphrase_encrypted_pool_session): - config = call("systemdataset.config") - assert config["pool"] == pool - - try: - call("pool.dataset.delete", passphrase_encrypted_pool_session, {"recursive": True}) - except CallError as e: - if e.errno != errno.ENOENT: - raise - - # If root dataset is locked, let's unlock it here - # It can be locked if some test locks it but does not unlock it later on and we should have - # a clean slate whenever we are trying to test using this pool/root dataset - if call("pool.dataset.get_instance", passphrase_encrypted_pool_session)["locked"]: - call("pool.dataset.unlock", passphrase_encrypted_pool_session, { - "datasets": [{"name": passphrase_encrypted_pool_session, "passphrase": PASSPHRASE}], - }) - - yield passphrase_encrypted_pool_session - - -@pytest.mark.parametrize("lock", [False, True]) -def test_migrate_to_a_pool_with_passphrase_encrypted_root_dataset(passphrase_encrypted_pool, lock): - if lock: - call("pool.dataset.lock", passphrase_encrypted_pool, job=True) - - assert passphrase_encrypted_pool in call("systemdataset.pool_choices") - - call("systemdataset.update", {"pool": passphrase_encrypted_pool}, job=True) - - ds = call("zfs.dataset.get_instance", f"{passphrase_encrypted_pool}/.system") - assert ds["properties"]["encryption"]["value"] == "off" - - call("systemdataset.update", {"pool": pool}, job=True) - - -def test_lock_passphrase_encrypted_pool_with_system_dataset(passphrase_encrypted_pool): - call("systemdataset.update", {"pool": passphrase_encrypted_pool}, job=True) - - call("pool.dataset.lock", passphrase_encrypted_pool, job=True) - - ds = call("zfs.dataset.get_instance", f"{passphrase_encrypted_pool}/.system") - assert ds["properties"]["mounted"]["value"] == "yes" - - call("systemdataset.update", {"pool": pool}, job=True) - - -def test_system_dataset_mountpoints(): - system_config = call("systemdataset.config") - for system_dataset_spec in call( - "systemdataset.get_system_dataset_spec", system_config["pool"], system_config["uuid"] - ): - mount_point = system_dataset_spec.get("mountpoint") or os.path.join( - system_config["path"], os.path.basename(system_dataset_spec["name"]) - ) - - ds_stats = call("filesystem.stat", mount_point) - assert ds_stats["uid"] == system_dataset_spec["chown_config"]["uid"] - assert ds_stats["gid"] == system_dataset_spec["chown_config"]["gid"] - assert ds_stats["mode"] & 0o777 == system_dataset_spec["chown_config"]["mode"] diff --git a/tests/api2/test_system_general_ui_allowlist.py b/tests/api2/test_system_general_ui_allowlist.py deleted file mode 100644 index b24a01b9e9268..0000000000000 --- a/tests/api2/test_system_general_ui_allowlist.py +++ /dev/null @@ -1,84 +0,0 @@ -import socket -import time - -import requests -import websocket - -from middlewared.test.integration.utils import call, host, mock, ssh, url, websocket_url - - -def test_system_general_ui_allowlist(): - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.connect((host().ip, 1)) # connect() for UDP doesn't send packets - local_ip = s.getsockname()[0] - - with mock("vm.query", return_value=[ - {"id": 1, "name": ""}, - ]): - with mock("vm.device.query", return_value=[ - {"id": 1, "attributes": {"bind": "127.0.0.1", "port": 1, "web_port": 1}, "vm": 1} - ]): - try: - protected_endpoints = ( - "/_download", - "/_upload", - "/_plugins", - "/api/docs", - "/api/v2.0", - "/progress", - "/vm/display/1", - ) - protected_ws_endpoints = ( - ("/websocket", '{"msg": "connect", "version": "1"}'), - ("/websocket/shell", '{"token": "invalid"}'), - ) - - # Ensure we are testing endpoints that do not give 403 by default - for endpoint in protected_endpoints: - r = requests.get(url() + endpoint, timeout=10) - assert r.status_code != 403 - for endpoint, message in protected_ws_endpoints: - ws = websocket.create_connection(websocket_url() + endpoint) - ws.send(message) - resp_opcode, msg = ws.recv_data() - assert resp_opcode == 1, msg - - # Set `ui_allowlist` to IP we are using - call("system.general.update", {"ui_allowlist": [local_ip]}) - call("system.general.ui_restart", 0) - time.sleep(10) - - # Check everything still works - for endpoint in protected_endpoints: - r = requests.get(url() + endpoint, timeout=10) - assert r.status_code != 403 - for endpoint, message in protected_ws_endpoints: - ws = websocket.create_connection(websocket_url() + endpoint) - ws.send(message) - resp_opcode, msg = ws.recv_data() - assert resp_opcode == 1, msg - - # Set it to an invalid IP - call("system.general.update", {"ui_allowlist": ["8.8.8.8"]}) - call("system.general.ui_restart", 0) - time.sleep(10) - - # Ensure we are still able to open the UI - r = requests.get(url(), timeout=10) - assert r.status_code == 200 - - # Ensure that we can't access API - for endpoint in protected_endpoints: - r = requests.get(url() + endpoint, timeout=10) - assert r.status_code == 403, (endpoint, r.text) - for endpoint, message in protected_ws_endpoints: - ws = websocket.create_connection(websocket_url() + endpoint) - ws.send(message) - resp_opcode, msg = ws.recv_data() - assert resp_opcode == 8, msg - assert msg[2:].decode("utf-8") == "You are not allowed to access this resource" - finally: - # We are not allowed to access API, bring things back to normal via SSH - ssh("midclt call system.general.update '{\"ui_allowlist\": []}'") - ssh("midclt call system.general.ui_restart 0") - time.sleep(10) diff --git a/tests/api2/test_system_general_ui_rollback.py b/tests/api2/test_system_general_ui_rollback.py deleted file mode 100644 index 98b80cf7155d3..0000000000000 --- a/tests/api2/test_system_general_ui_rollback.py +++ /dev/null @@ -1,94 +0,0 @@ -import time -from contextlib import contextmanager - -from middlewared.test.integration.utils import call, client, ssh -from middlewared.test.integration.utils.client import truenas_server - -ROLLBACK = 20 -UI_DELAY = 3 -ORIG_PORT = 80 -NEW_PORT = 81 - - -def fallback_ui_fix(): - """Fix the UI port settings using SSH in case an - unexpected failure is met or we just want to reset - our changes""" - ssh(f"midclt call system.general.update '{{\"ui_port\": {ORIG_PORT}}}'") - ssh("midclt call system.general.ui_restart 0") - time.sleep(5) - - -@contextmanager -def client_with_timeout(host_ip=None, tries=30): - for _ in range(tries): - try: - with client(host_ip=host_ip) as c: - assert c.call("core.ping") == "pong" - yield c - break - except ConnectionRefusedError: - time.sleep(1) - else: - assert False, "Could not connect to client." - - -def test_system_general_ui_rollback(): - """This tests the following: - 1. change the port the nginx service binds to (our UI) - 2. ensure communication with the API on the original port failsinal port fails - 3. ensure communication with the API on the new port succeeds - 4. check the time left before the changes are rolled back - 5. sleep that amount of time (plus a few seconds for a buffer) - 6. ensure communication with the API on the original port succeeds - 7. if any above steps fail, revert the UI port settings via ssh""" - try: - # Step 1 - call( - "system.general.update", - {"ui_port": NEW_PORT, "rollback_timeout": ROLLBACK, "ui_restart_delay": UI_DELAY} - ) - - # Step 2 - try: - assert call("core.ping") != "pong" - except Exception: - pass - - # Step 3 - with client_with_timeout(host_ip=f"{truenas_server.ip}:{NEW_PORT}") as c: - rollback_left = c.call("system.general.checkin_waiting") - # Step 4 - assert rollback_left < ROLLBACK - - # Step 5 - time.sleep(rollback_left + 5) - # Step 6 - assert call("core.ping") == "pong" - except Exception: - # Step 7 - fallback_ui_fix() - raise - - -def test_system_general_ui_checkin(): - """This tests the following: - 1. change the port the nginx service binds to (our UI) - 2. immediately checkin the UI port changes - 3. ensure we don't have a checkin pending - 4. revert any UI port settings via ssh""" - try: - # Step 1 - call( - "system.general.update", - {"ui_port": NEW_PORT, "rollback_timeout": ROLLBACK, "ui_restart_delay": UI_DELAY} - ) - - # Step 2 - with client_with_timeout(host_ip=f"{truenas_server.ip}:{NEW_PORT}") as c: - # Step 3 - c.call("system.general.checkin") - # Step 4 - assert c.call("system.general.checkin_waiting") is None - finally: - fallback_ui_fix() diff --git a/tests/api2/test_system_settings_roles.py b/tests/api2/test_system_settings_roles.py deleted file mode 100644 index c7a6317be158e..0000000000000 --- a/tests/api2/test_system_settings_roles.py +++ /dev/null @@ -1,24 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.roles import common_checks - - -@pytest.mark.parametrize('role,endpoint,payload,should_work,valid_role_exception,is_return_type_none', [ - ('SYSTEM_GENERAL_READ', 'system.general.config', [], True, False, False), - ('READONLY_ADMIN', 'system.general.update', [{}], False, False, False), - ('SYSTEM_GENERAL_WRITE', 'system.general.update', [{}], True, False, False), - ('SYSTEM_ADVANCED_READ', 'system.advanced.config', [], True, False, False), - ('READONLY_ADMIN', 'system.advanced.update', [{}], False, False, False), - ('SYSTEM_ADVANCED_WRITE', 'system.advanced.update', [{}], True, False, False), - ('SYSTEM_ADVANCED_READ', 'system.advanced.sed_global_password', [], True, False, False), - ('READONLY_ADMIN', 'system.advanced.update_gpu_pci_ids', [[]], False, False, False), - ('SYSTEM_ADVANCED_WRITE', 'system.advanced.update_gpu_pci_ids', [], True, False, True), - ('SYSTEM_GENERAL_READ', 'system.general.local_url', [], True, False, False), -]) -def test_system_settings_read_and_write_role( - unprivileged_user_fixture, role, endpoint, payload, should_work, valid_role_exception, is_return_type_none -): - common_checks( - unprivileged_user_fixture, endpoint, role, should_work, is_return_type_none=is_return_type_none, - valid_role_exception=valid_role_exception, method_args=payload - ) diff --git a/tests/api2/test_system_vendor.py b/tests/api2/test_system_vendor.py deleted file mode 100644 index 2d8c87759da11..0000000000000 --- a/tests/api2/test_system_vendor.py +++ /dev/null @@ -1,15 +0,0 @@ -from middlewared.test.integration.utils import call, ssh - - -SENTINEL_FILE_PATH = "/data/.vendor" - - -def test_no_vendor_file(): - file_exists = ssh(f"test -e {SENTINEL_FILE_PATH}", check=False, complete_response=True)["result"] - assert not file_exists - assert not call("system.vendor.is_vendored") - - -def test_name_is_none(): - vendor_name = call("system.vendor.name") - assert vendor_name is None diff --git a/tests/api2/test_truecommand_roles.py b/tests/api2/test_truecommand_roles.py deleted file mode 100644 index d04c873654d2f..0000000000000 --- a/tests/api2/test_truecommand_roles.py +++ /dev/null @@ -1,23 +0,0 @@ -import pytest - -from middlewared.test.integration.assets.roles import common_checks - - -def test_truecommand_readonly_role(unprivileged_user_fixture): - common_checks( - unprivileged_user_fixture, 'truenas.managed_by_truecommand', 'READONLY_ADMIN', True, valid_role_exception=False - ) - - -@pytest.mark.parametrize('endpoint,role,should_work,valid_role_exception', [ - ('truecommand.config', 'TRUECOMMAND_READ', True, False), - ('truecommand.config', 'TRUECOMMAND_WRITE', True, False), - ('truecommand.info', 'TRUECOMMAND_READ', True, False), - ('truecommand.info', 'TRUECOMMAND_WRITE', True, False), - ('truecommand.update', 'TRUECOMMAND_READ', False, True), - ('truecommand.update', 'TRUECOMMAND_WRITE', True, True), -]) -def test_truecommand_read_and_write_role(unprivileged_user_fixture, endpoint, role, should_work, valid_role_exception): - common_checks( - unprivileged_user_fixture, endpoint, role, should_work, valid_role_exception=valid_role_exception - ) diff --git a/tests/api2/test_tunables.py b/tests/api2/test_tunables.py deleted file mode 100644 index 2b57348139a0c..0000000000000 --- a/tests/api2/test_tunables.py +++ /dev/null @@ -1,164 +0,0 @@ -import pytest - -from truenas_api_client import ValidationErrors -from middlewared.test.integration.utils import call, ssh -from middlewared.test.integration.utils.mock_binary import mock_binary - -SYSCTL = "kernel.watchdog" -SYSCTL_DEFAULT_VALUE = "1" -SYSCTL_NEW_VALUE = "0" - -ZFS = "zil_nocacheflush" -ZFS_DEFAULT_VALUE = "0" -ZFS_NEW_VALUE = "1" - - -def test_create_invalid_sysctl(): - with pytest.raises(ValidationErrors) as ve: - call("tunable.create", { - "type": "SYSCTL", - "var": "kernel.truenas", - "value": "1", - }, job=True) - - assert ve.value.errors[0].attribute == "tunable_create.var" - - -def test_create_invalid_udev(): - with pytest.raises(ValidationErrors) as ve: - call("tunable.create", { - "type": "UDEV", - "var": "61-truenas-pmem", - "value": "# disable built-in truenas rule to enable memory loss", - }, job=True) - - assert ve.value.errors[0].attribute == "tunable_create.var" - - -def test_create_invalid_zfs(): - with pytest.raises(ValidationErrors) as ve: - call("tunable.create", { - "type": "ZFS", - "var": "zfs_truenas", - "value": "1", - }, job=True) - - assert ve.value.errors[0].attribute == "tunable_create.var" - - -def test_sysctl_lifecycle(): - def assert_default_value(): - assert ssh("cat /etc/sysctl.d/tunables.conf", check=False) == f"" - assert ssh(f"sysctl -n {SYSCTL}") == f"{SYSCTL_DEFAULT_VALUE}\n" - - def assert_new_value(): - assert ssh("cat /etc/sysctl.d/tunables.conf") == f"{SYSCTL}={SYSCTL_NEW_VALUE}\n" - assert ssh(f"sysctl -n {SYSCTL}") == f"{SYSCTL_NEW_VALUE}\n" - - assert_default_value() - - tunable = call("tunable.create", { - "type": "SYSCTL", - "var": SYSCTL, - "value": SYSCTL_NEW_VALUE, - }, job=True) - - assert_new_value() - - call("tunable.update", tunable["id"], { - "enabled": False, - }, job=True) - - assert_default_value() - - call("tunable.update", tunable["id"], { - "enabled": True, - }, job=True) - - assert_new_value() - - call("tunable.delete", tunable["id"], job=True) - - assert_default_value() - - -def test_udev_lifecycle(): - def assert_exists(): - assert ssh("cat /etc/udev/rules.d/10-disable-usb.rules") == f"BUS==\"usb\", OPTIONS+=\"ignore_device\"\n" - - def assert_does_not_exist(): - assert ssh("cat /etc/udev/rules.d/10-disable-usb.rules", check=False) == f"" - - tunable = call("tunable.create", { - "type": "UDEV", - "var": "10-disable-usb", - "value": "BUS==\"usb\", OPTIONS+=\"ignore_device\"" - }, job=True) - - assert_exists() - - call("tunable.update", tunable["id"], { - "enabled": False, - }, job=True) - - assert_does_not_exist() - - call("tunable.update", tunable["id"], { - "enabled": True, - }, job=True) - - assert_exists() - - call("tunable.delete", tunable["id"], job=True) - - assert_does_not_exist() - - -def test_zfs_lifecycle(): - with mock_binary("/usr/sbin/update-initramfs", exitcode=0): - def assert_default_value(): - assert ssh("cat /etc/modprobe.d/zfs.conf", check=False) == f"" - assert ssh(f"cat /sys/module/zfs/parameters/{ZFS}") == f"{ZFS_DEFAULT_VALUE}\n" - - def assert_new_value(): - assert ssh("cat /etc/modprobe.d/zfs.conf", check=False) == f"options zfs {ZFS}={ZFS_NEW_VALUE}\n" - assert ssh(f"cat /sys/module/zfs/parameters/{ZFS}") == f"{ZFS_NEW_VALUE}\n" - - assert_default_value() - - tunable = call("tunable.create", { - "type": "ZFS", - "var": ZFS, - "value": ZFS_NEW_VALUE, - }, job=True) - - assert_new_value() - - call("tunable.update", tunable["id"], { - "enabled": False, - }, job=True) - - assert_default_value() - - call("tunable.update", tunable["id"], { - "enabled": True, - }, job=True) - - assert_new_value() - - call("tunable.delete", tunable["id"], job=True) - - assert_default_value() - - -def test_arc_max_set(): - tunable = call("tunable.create", {"type": "ZFS", "var": "zfs_arc_max", "value": 8675309}, job=True) - try: - val = ssh("cat /sys/module/zfs/parameters/zfs_arc_max") - finally: - call("tunable.delete", tunable["id"], job=True) - - assert int(val.strip()) == 8675309 - - mount_info = call("filesystem.mount_info", [["mountpoint", "=", "/"]], {"get": True}) - assert "RO" in mount_info["super_opts"] diff --git a/tests/api2/test_twofactor_auth.py b/tests/api2/test_twofactor_auth.py deleted file mode 100644 index 70a9486dcffae..0000000000000 --- a/tests/api2/test_twofactor_auth.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/env python3 -import contextlib -import errno -import os -import sys -import pytest - -apifolder = os.getcwd() -sys.path.append(apifolder) - -from middlewared.service_exception import CallError -from middlewared.test.integration.assets.account import user as user_create -from middlewared.test.integration.assets.two_factor_auth import enabled_twofactor_auth, get_user_secret, get_2fa_totp_token -from middlewared.test.integration.assets.account import unprivileged_user -from middlewared.test.integration.utils import call, client - - -TEST_USERNAME = 'test2fauser' -TEST_USERNAME_2 = 'test2fauser2' -TEST_PASSWORD = 'testpassword' -TEST_PASSWORD_2 = 'testpassword2' -TEST_GID = 544 -TEST_TWOFACTOR_INTERVAL = {'interval': 60} -USERS_2FA_CONF = { - TEST_USERNAME: {'interval': 30, 'otp_digits': 6}, - TEST_USERNAME_2: {'interval': 40, 'otp_digits': 7} -} - - -@contextlib.contextmanager -def user(data: dict): - data['group'] = call('group.query', [['gid', '=', TEST_GID]], {'get': True})['id'] - with user_create(data) as user_obj: - yield user_obj - - -def test_login_without_2fa(): - with user({ - 'username': TEST_USERNAME, - 'password': TEST_PASSWORD, - 'full_name': TEST_USERNAME, - }): - assert call('auth.get_login_user', TEST_USERNAME, TEST_PASSWORD) is not None - - -@pytest.mark.parametrize("user_name,password,renew_options", [ - ('test_user1', 'test_password1', {'interval': 30, 'otp_digits': 6}), - ('test_user2', 'test_password2', {'interval': 60, 'otp_digits': 7}), - ('test_user3', 'test_password3', {'interval': 50, 'otp_digits': 8}), -]) -def test_secret_generation_for_user(user_name, password, renew_options): - with user({ - 'username': user_name, - 'password': password, - 'full_name': user_name, - }) as user_obj: - assert get_user_secret(user_obj['id'], False) != [] - assert get_user_secret(user_obj['id'])['secret'] is None - - call('user.renew_2fa_secret', user_obj['username'], renew_options) - - user_secret_obj = get_user_secret(user_obj['id']) - assert user_secret_obj['secret'] is not None - for k in ('interval', 'otp_digits'): - assert user_secret_obj[k] == renew_options[k] - - -def test_secret_generation_for_multiple_users(): - with user({ - 'username': TEST_USERNAME, - 'password': TEST_PASSWORD, - 'full_name': TEST_USERNAME, - }) as first_user: - call('user.renew_2fa_secret', first_user['username'], USERS_2FA_CONF[first_user['username']]) - with user({ - 'username': TEST_USERNAME_2, - 'password': TEST_PASSWORD_2, - 'full_name': TEST_USERNAME_2, - }) as second_user: - call('user.renew_2fa_secret', second_user['username'], USERS_2FA_CONF[second_user['username']]) - for user_obj in (first_user, second_user): - user_secret_obj = get_user_secret(user_obj['id']) - assert user_secret_obj['secret'] is not None - for k in ('interval', 'otp_digits'): - assert user_secret_obj[k] == USERS_2FA_CONF[user_obj['username']][k] - - -def test_login_without_otp_for_user_without_2fa(): - with user({ - 'username': TEST_USERNAME_2, - 'password': TEST_PASSWORD_2, - 'full_name': TEST_USERNAME_2, - }): - with enabled_twofactor_auth(): - assert call('auth.get_login_user', TEST_USERNAME_2, TEST_PASSWORD_2) is not None - - -def test_login_with_otp_for_user_with_2fa(): - with user({ - 'username': TEST_USERNAME_2, - 'password': TEST_PASSWORD_2, - 'full_name': TEST_USERNAME_2, - }) as user_obj: - with enabled_twofactor_auth(): - call('user.renew_2fa_secret', user_obj['username'], TEST_TWOFACTOR_INTERVAL) - assert call( - 'auth.get_login_user', TEST_USERNAME_2, TEST_PASSWORD_2, - get_2fa_totp_token(get_user_secret(user_obj['id'])) - ) is not None - - -def test_user_2fa_secret_renewal(): - with user({ - 'username': TEST_USERNAME_2, - 'password': TEST_PASSWORD_2, - 'full_name': TEST_USERNAME_2, - }) as user_obj: - with enabled_twofactor_auth(): - call('user.renew_2fa_secret', user_obj['username'], TEST_TWOFACTOR_INTERVAL) - assert call( - 'auth.get_login_user', TEST_USERNAME_2, TEST_PASSWORD_2, - get_2fa_totp_token(get_user_secret(user_obj['id'])) - ) is not None - secret = get_user_secret(user_obj['id']) - - call('user.renew_2fa_secret', user_obj['username'], TEST_TWOFACTOR_INTERVAL) - call('user.get_instance', user_obj['id']) - assert get_user_secret(user_obj['id'])['secret'] != secret - - assert call( - 'auth.get_login_user', TEST_USERNAME_2, TEST_PASSWORD_2, - get_2fa_totp_token(get_user_secret(user_obj['id'])) - ) is not None - - -def test_restricted_user_2fa_secret_renewal(): - with unprivileged_user( - username=TEST_USERNAME, - group_name='TEST_2FA_GROUP', - privilege_name='TEST_2FA_PRIVILEGE', - allowlist=[], - web_shell=False, - roles=['READONLY_ADMIN'] - ) as acct: - with enabled_twofactor_auth(): - with client(auth=(acct.username, acct.password)) as c: - with pytest.raises(CallError) as ve: - # Trying to renew another user's 2fa token should fail - c.call('user.renew_2fa_secret', "root", TEST_TWOFACTOR_INTERVAL) - - assert ve.value.errno == errno.EPERM - - c.call('user.renew_2fa_secret', acct.username, TEST_TWOFACTOR_INTERVAL) - user_obj = call('user.query', [['username', '=', acct.username]], {'get': True}) - assert call( - 'auth.get_login_user', acct.username, acct.password, - get_2fa_totp_token(get_user_secret(user_obj['id'])) - ) is not None - secret = get_user_secret(user_obj['id']) - - c.call('user.renew_2fa_secret', acct.username, TEST_TWOFACTOR_INTERVAL) - assert get_user_secret(user_obj['id'])['secret'] != secret - - assert call( - 'auth.get_login_user', acct.username, acct.password, - get_2fa_totp_token(get_user_secret(user_obj['id'])) - ) is not None - - -def test_multiple_users_login_with_otp(): - with user({ - 'username': TEST_USERNAME, - 'password': TEST_PASSWORD, - 'full_name': TEST_USERNAME, - }) as first_user: - with enabled_twofactor_auth(): - assert call('auth.get_login_user', TEST_USERNAME, TEST_PASSWORD) is not None - - with user({ - 'username': TEST_USERNAME_2, - 'password': TEST_PASSWORD_2, - 'full_name': TEST_USERNAME_2, - }) as second_user: - call('user.renew_2fa_secret', second_user['username'], TEST_TWOFACTOR_INTERVAL) - assert call( - 'auth.get_login_user', TEST_USERNAME_2, TEST_PASSWORD_2, - get_2fa_totp_token(get_user_secret(second_user['id'])) - ) is not None - - assert call('auth.get_login_user', TEST_USERNAME_2, TEST_PASSWORD_2) is None - - call('user.renew_2fa_secret', first_user['username'], TEST_TWOFACTOR_INTERVAL) - - assert call( - 'auth.get_login_user', TEST_USERNAME, TEST_PASSWORD, - get_2fa_totp_token(get_user_secret(first_user['id'])) - ) is not None diff --git a/tests/api2/test_ui_caching.py b/tests/api2/test_ui_caching.py deleted file mode 100644 index dcd2ab94d25bb..0000000000000 --- a/tests/api2/test_ui_caching.py +++ /dev/null @@ -1,35 +0,0 @@ -import re - -import pytest -import requests - -from middlewared.test.integration.utils import url - -RE_MAIN_SCRIPT = re.compile(r'