Skip to content

Commit

Permalink
Update the function with good practices
Browse files Browse the repository at this point in the history
Signed-off-by: Harshil Jani <[email protected]>
  • Loading branch information
Harshil-Jani committed Feb 27, 2023
1 parent 27674f4 commit 7bd9aba
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 24 deletions.
8 changes: 0 additions & 8 deletions examples/ecdh.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,6 @@

#include "random.h"

void secure_erase(void *buf, size_t size) {
volatile char *vbuf = (volatile char *)buf;
size_t i;
for (i = 0; i < size; ++i) {
vbuf[i] = 0;
}
}

int main(void) {
unsigned char seckey1[32];
unsigned char seckey2[32];
Expand Down
8 changes: 0 additions & 8 deletions examples/ecdsa.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,6 @@

#include "random.h"

void secure_erase(void *buf, size_t size) {
volatile char *vbuf = (volatile char *)buf;
size_t i;
for (i = 0; i < size; ++i) {
vbuf[i] = 0;
}
}

int main(void) {
/* Instead of signing the message directly, we must sign a 32-byte hash.
* Here the message is "Hello, world!" and the hash function was SHA-256.
Expand Down
29 changes: 29 additions & 0 deletions examples/random.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,3 +71,32 @@ static void print_hex(unsigned char* data, size_t size) {
}
printf("\n");
}

#if defined(_MSC_VER)
// For SecureZeroMemory
#include <Windows.h>
#endif
/* Cleanses memory to prevent leaking sensitive info. Won't be optimized out. */
static SECP256K1_INLINE void secure_erase(void *ptr, size_t len) {
#if defined(_MSC_VER)
/* SecureZeroMemory is guaranteed not to be optimized out by MSVC. */
SecureZeroMemory(ptr, n);
#elif defined(__GNUC__)
/* We use a memory barrier that scares the compiler away from optimizing out the memset.
*
* Quoting Adam Langley <[email protected]> in commit ad1907fe73334d6c696c8539646c21b11178f20f
* in BoringSSL (ISC License):
* As best as we can tell, this is sufficient to break any optimisations that
* might try to eliminate "superfluous" memsets.
* This method used in memzero_explicit() the Linux kernel, too. Its advantage is that it is
* pretty efficient, because the compiler can still implement the memset() efficently,
* just not remove it entirely. See "Dead Store Elimination (Still) Considered Harmful" by
* Yang et al. (USENIX Security 2017) for more background.
*/
memset(ptr, 0, len);
__asm__ __volatile__("" : : "r"(ptr) : "memory");
#else
void *(*volatile const volatile_memset)(void *, int, size_t) = memset;
volatile_memset(ptr, 0, len);
#endif
}
8 changes: 0 additions & 8 deletions examples/schnorr.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,6 @@

#include "random.h"

void secure_erase(void *buf, size_t size) {
volatile char *vbuf = (volatile char *)buf;
size_t i;
for (i = 0; i < size; ++i) {
vbuf[i] = 0;
}
}

int main(void) {
unsigned char msg[12] = "Hello World!";
unsigned char msg_hash[32];
Expand Down

0 comments on commit 7bd9aba

Please sign in to comment.