Skip to content

Commit

Permalink
Upstream: SHA2 reworking and API for iterating
Browse files Browse the repository at this point in the history
over multiple implementations

The changes in the shared files to enable macOS support to PR

Signed-off-by: Jorgen Lundman <[email protected]>
  • Loading branch information
lundman committed Oct 19, 2023
1 parent 8812f26 commit 802749c
Show file tree
Hide file tree
Showing 11 changed files with 89 additions and 26 deletions.
6 changes: 4 additions & 2 deletions include/os/macos/spl/sys/aarch64/asm_linkage.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,16 @@
.balign ASM_ENTRY_ALIGN %% \
.globl _##x %% \
_##x: %% \
x:
x: \
bti c // hint #34

#define ENTRY_ALIGN(x, a) \
.text %% \
.balign a %% \
.globl _##x %% \
_##x: %% \
x:
x: \
bti c // hint #34

#define FUNCTION(x) \
x:
Expand Down
21 changes: 21 additions & 0 deletions lib/libspl/include/sys/simd.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,27 @@ static inline unsigned long getauxval(unsigned long key)
#define AT_HWCAP 16
#define AT_HWCAP2 26
extern unsigned long getauxval(unsigned long type);
#elif defined(__APPLE__)
#include <sys/sysctl.h>
#define AT_HWCAP 0
static inline unsigned long getauxval(unsigned long key)
{
(void) key;
/* HWCAP_ are all defined halfway down this file */
unsigned long val = 1 /* HWCAP_FP */;
int intval;
size_t intvallen = sizeof (intval);
int err;
err = sysctlbyname("hw.optional.arm.FEAT_SHA256",
&intval, &intvallen, NULL, 0);
if (err == 0 && intval != 0)
val |= 0x00000040; /* SHA256 */
err = sysctlbyname("hw.optional.arm.FEAT_SHA512",
&intval, &intvallen, NULL, 0);
if (err == 0 && intval != 0)
val |= 0x00200000; /* SHA512 */
return (val);
}
#endif /* __linux__ */
#endif /* arm || aarch64 || powerpc */

Expand Down
4 changes: 3 additions & 1 deletion module/icp/algs/aes/aes_impl.c
Original file line number Diff line number Diff line change
Expand Up @@ -418,14 +418,16 @@ aes_impl_set(const char *val)
}

#if defined(_KERNEL)
#if defined(__linux__) || defined(__APPLE__)

#if defined(__linux__)
static int
icp_aes_impl_set(const char *val, zfs_kernel_param_t *kp)
{
return (aes_impl_set(val));
}
#endif

#if defined(__linux__) || defined(__APPLE__)
static int
icp_aes_impl_get(char *buffer, zfs_kernel_param_t *kp)
{
Expand Down
2 changes: 1 addition & 1 deletion module/icp/algs/blake3/blake3_impl.c
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ blake3_param(ZFS_MODULE_PARAM_ARGS)
int err;

generic_impl_init();
if (req->newptr == NULL) {
if ((void *)req->newptr == NULL) {
const uint32_t impl = IMPL_READ(generic_impl_chosen);
const int init_buflen = 64;
const char *fmt;
Expand Down
21 changes: 17 additions & 4 deletions module/icp/algs/modes/gcm.c
Original file line number Diff line number Diff line change
Expand Up @@ -889,7 +889,13 @@ gcm_impl_init(void)
if (gcm_avx_will_work()) {
#ifdef HAVE_MOVBE
if (zfs_movbe_available() == B_TRUE) {
#ifdef __APPLE__
atomic_swap_32(
(volatile unsigned int *)&gcm_avx_can_use_movbe,
B_TRUE);
#else
atomic_swap_32(&gcm_avx_can_use_movbe, B_TRUE);
#endif
}
#endif
if (GCM_IMPL_READ(user_sel_impl) == IMPL_FASTEST) {
Expand Down Expand Up @@ -992,13 +998,16 @@ gcm_impl_set(const char *val)
}

#if defined(_KERNEL)
#if defined(__linux__) || defined(__APPLE__)

#if defined(__linux__)
static int
icp_gcm_impl_set(const char *val, zfs_kernel_param_t *kp)
{
return (gcm_impl_set(val));
}
#endif

#if defined(__linux__) || defined(__APPLE__)
static int
icp_gcm_impl_get(char *buffer, zfs_kernel_param_t *kp)
{
Expand Down Expand Up @@ -1120,7 +1129,11 @@ static inline void
gcm_set_avx(boolean_t val)
{
if (gcm_avx_will_work() == B_TRUE) {
#ifdef __APPLE__
atomic_swap_32((volatile unsigned int *)&gcm_use_avx, val);
#else
atomic_swap_32(&gcm_use_avx, val);
#endif
}
}

Expand Down Expand Up @@ -1571,6 +1584,8 @@ gcm_init_avx(gcm_ctx_t *ctx, const uint8_t *iv, size_t iv_len,
}

#if defined(_KERNEL)

#if defined(__linux__)
static int
icp_gcm_avx_set_chunk_size(const char *buf, zfs_kernel_param_t *kp)
{
Expand All @@ -1591,9 +1606,9 @@ icp_gcm_avx_set_chunk_size(const char *buf, zfs_kernel_param_t *kp)
error = param_set_uint(val_rounded, kp);
return (error);
}
#endif

#ifdef __APPLE__

/* Lives in here to have access to GCM macros */
int
param_icp_gcm_avx_set_chunk_size(ZFS_MODULE_PARAM_ARGS)
Expand Down Expand Up @@ -1622,10 +1637,8 @@ param_icp_gcm_avx_set_chunk_size(ZFS_MODULE_PARAM_ARGS)
gcm_avx_chunk_size = val;
return (rc);
}

#endif


module_param_call(icp_gcm_avx_chunk_size, icp_gcm_avx_set_chunk_size,
param_get_uint, &gcm_avx_chunk_size, 0644);

Expand Down
2 changes: 1 addition & 1 deletion module/icp/algs/sha2/sha256_impl.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ sha256_param(ZFS_MODULE_PARAM_ARGS)
int err;

generic_impl_init();
if (req->newptr == NULL) {
if ((void *)req->newptr == NULL) {
const uint32_t impl = IMPL_READ(generic_impl_chosen);
const int init_buflen = 64;
const char *fmt;
Expand Down
2 changes: 1 addition & 1 deletion module/icp/algs/sha2/sha512_impl.c
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ sha512_param(ZFS_MODULE_PARAM_ARGS)
int err;

generic_impl_init();
if (req->newptr == NULL) {
if ((void *)req->newptr == NULL) {
const uint32_t impl = IMPL_READ(generic_impl_chosen);
const int init_buflen = 64;
const char *fmt;
Expand Down
28 changes: 21 additions & 7 deletions module/icp/asm-aarch64/sha2/sha256-armv8.S
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,14 @@
* - modified assembly to fit into OpenZFS
*/

#include <sys/asm_linkage.h>

#if defined(__aarch64__)

.text
SECTION_TEXT

.align 6
.type .LK256,%object
.balign 64
SET_OBJ(.LK256)
.LK256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
Expand All @@ -43,13 +45,17 @@
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0 //terminator
.size .LK256,.-.LK256
SET_SIZE(.LK256)

#ifdef __APPLE__
ENTRY_ALIGN(zfs_sha256_block_armv7, 64)
#else
.globl zfs_sha256_block_armv7
.type zfs_sha256_block_armv7,%function
.align 6
zfs_sha256_block_armv7:
hint #34 // bti c
#endif
stp x29,x30,[sp,#-128]!
add x29,sp,#0

Expand Down Expand Up @@ -1010,13 +1016,17 @@ zfs_sha256_block_armv7:
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
ret
.size zfs_sha256_block_armv7,.-zfs_sha256_block_armv7
SET_SIZE(zfs_sha256_block_armv7)

#ifdef __APPLE__
ENTRY_ALIGN(zfs_sha256_block_armv8, 64)
#else
.globl zfs_sha256_block_armv8
.type zfs_sha256_block_armv8,%function
.align 6
zfs_sha256_block_armv8:
hint #34 // bti c
#endif
.Lv8_entry:
stp x29,x30,[sp,#-16]!
add x29,sp,#0
Expand Down Expand Up @@ -1151,13 +1161,17 @@ zfs_sha256_block_armv8:

ldr x29,[sp],#16
ret
.size zfs_sha256_block_armv8,.-zfs_sha256_block_armv8
SET_SIZE(zfs_sha256_block_armv8)

#ifdef __APPLE__
ENTRY_ALIGN(zfs_sha256_block_neon, 16)
#else
.globl zfs_sha256_block_neon
.type zfs_sha256_block_neon,%function
.align 4
zfs_sha256_block_neon:
hint #34 // bti c
#endif
.Lneon_entry:
stp x29, x30, [sp, #-16]!
mov x29, sp
Expand Down Expand Up @@ -1997,6 +2011,6 @@ zfs_sha256_block_neon:
ldr x29,[x29]
add sp,sp,#16*4+16
ret
.size zfs_sha256_block_neon,.-zfs_sha256_block_neon
SET_SIZE(zfs_sha256_block_neon)

#endif
23 changes: 16 additions & 7 deletions module/icp/asm-aarch64/sha2/sha512-armv8.S
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,14 @@
* Portions Copyright (c) 2022 Tino Reichardt <[email protected]>
* - modified assembly to fit into OpenZFS
*/
#include <sys/asm_linkage.h>

#if defined(__aarch64__)

.text
SECTION_TEXT

.align 6
.type .LK512,%object
.balign 64
SET_OBJ(.LK512)
.LK512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
Expand Down Expand Up @@ -67,13 +68,17 @@
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0 // terminator
.size .LK512,.-.LK512
SET_SIZE(.LK512)

#ifdef __APPLE__
ENTRY_ALIGN(zfs_sha512_block_armv7, 64)
#else
.globl zfs_sha512_block_armv7
.type zfs_sha512_block_armv7,%function
.align 6
zfs_sha512_block_armv7:
hint #34 // bti c
#endif
stp x29,x30,[sp,#-128]!
add x29,sp,#0

Expand Down Expand Up @@ -1034,14 +1039,18 @@ zfs_sha512_block_armv7:
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
ret
.size zfs_sha512_block_armv7,.-zfs_sha512_block_armv7
SET_SIZE(zfs_sha512_block_armv7)


#ifdef __APPLE__
ENTRY_ALIGN(zfs_sha512_block_armv8, 64)
#else
.globl zfs_sha512_block_armv8
.type zfs_sha512_block_armv8,%function
.align 6
zfs_sha512_block_armv8:
hint #34 // bti c
#endif
.Lv8_entry:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later
stp x29,x30,[sp,#-16]!
Expand All @@ -1063,7 +1072,7 @@ zfs_sha512_block_armv8:
rev64 v23.16b,v23.16b
b .Loop_hw

.align 4
.balign 16
.Loop_hw:
ld1 {v24.2d},[x3],#16
subs x2,x2,#1
Expand Down Expand Up @@ -1556,5 +1565,5 @@ zfs_sha512_block_armv8:

ldr x29,[sp],#16
ret
.size zfs_sha512_block_armv8,.-zfs_sha512_block_armv8
SET_SIZE(zfs_sha512_block_armv8)
#endif
2 changes: 1 addition & 1 deletion module/zcommon/zfs_fletcher.c
Original file line number Diff line number Diff line change
Expand Up @@ -949,7 +949,7 @@ fletcher_4_param(ZFS_MODULE_PARAM_ARGS)
{
int err = 0;

if (req->newptr == NULL) {
if ((void *)req->newptr == NULL) {
const uint32_t impl = IMPL_READ(fletcher_4_impl_chosen);
const int init_buflen = 64;
const char *fmt;
Expand Down
4 changes: 3 additions & 1 deletion module/zfs/vdev_raidz_math.c
Original file line number Diff line number Diff line change
Expand Up @@ -634,14 +634,16 @@ vdev_raidz_impl_set(const char *val)
}

#if defined(_KERNEL)
#if defined(__linux__) || defined(__APPLE__)

#if defined(__linux__)
static int
zfs_vdev_raidz_impl_set(const char *val, zfs_kernel_param_t *kp)
{
return (vdev_raidz_impl_set(val));
}
#endif

#if defined(__linux__) || defined(__APPLE__)
static int
zfs_vdev_raidz_impl_get(char *buffer, zfs_kernel_param_t *kp)
{
Expand Down

0 comments on commit 802749c

Please sign in to comment.