Skip to content
This repository has been archived by the owner on Jul 16, 2024. It is now read-only.

Commit

Permalink
BACKPORT: ACPI / APEI: Move locking to the notification helper
Browse files Browse the repository at this point in the history
ghes_copy_tofrom_phys() takes different locks depending on in_nmi().
This doesn't work if there are multiple NMI-like notifications, that
can interrupt each other.

Now that NOTIFY_SEA is always called in the same context, move the
lock-taking to the notification helper. The helper will always know
which lock to take. This avoids ghes_copy_tofrom_phys() taking a guess
based on in_nmi().

This splits NOTIFY_NMI and NOTIFY_SEA to use different locks. All
the other notifications use ghes_proc(), and are called in process
or IRQ context. Move the spin_lock_irqsave() around their ghes_proc()
calls.

This patch is needed because Quicksilver firmware-first error handling
uses the SDEI notification type for communication between trusted
firmware and the OS. This adds needed NMI and SDEI functionality so
that the SDEI path in the kernel through APEI acts as an NMI and is
properly wired up to the APEI interfaces.

Backported from: torvalds/linux@3b880cb

Signed-off-by: James Morse <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Signed-off-by: Tyler Baicar <[email protected]>
  • Loading branch information
James Morse authored and tphan-ampere committed Apr 21, 2020
1 parent ab77d4b commit 2ff3da1
Showing 1 changed file with 25 additions and 9 deletions.
34 changes: 25 additions & 9 deletions drivers/acpi/apei/ghes.c
Original file line number Diff line number Diff line change
Expand Up @@ -114,11 +114,10 @@ static DEFINE_MUTEX(ghes_list_mutex);
* handler, but general ioremap can not be used in atomic context, so
* the fixmap is used instead.
*
* These 2 spinlocks are used to prevent the fixmap entries from being used
* This spinlock is used to prevent the fixmap entry from being used
* simultaneously.
*/
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
static DEFINE_SPINLOCK(ghes_notify_lock_irq);

static struct gen_pool *ghes_estatus_pool;
static unsigned long ghes_estatus_pool_size_request;
Expand Down Expand Up @@ -287,18 +286,15 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
int from_phys)
{
void __iomem *vaddr;
unsigned long flags = 0;
int in_nmi = in_nmi();
u64 offset;
u32 trunk;

while (len > 0) {
offset = paddr - (paddr & PAGE_MASK);
if (in_nmi) {
raw_spin_lock(&ghes_ioremap_lock_nmi);
vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
} else {
spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
}
trunk = PAGE_SIZE - offset;
Expand All @@ -312,10 +308,8 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
buffer += trunk;
if (in_nmi) {
ghes_iounmap_nmi();
raw_spin_unlock(&ghes_ioremap_lock_nmi);
} else {
ghes_iounmap_irq();
spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
}
}
}
Expand Down Expand Up @@ -729,18 +723,24 @@ static void ghes_add_timer(struct ghes *ghes)
static void ghes_poll_func(struct timer_list *t)
{
struct ghes *ghes = from_timer(ghes, t, timer);
unsigned long flags;

spin_lock_irqsave(&ghes_notify_lock_irq, flags);
ghes_proc(ghes);
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
if (!(ghes->flags & GHES_EXITING))
ghes_add_timer(ghes);
}

static irqreturn_t ghes_irq_func(int irq, void *data)
{
struct ghes *ghes = data;
unsigned long flags;
int rc;

spin_lock_irqsave(&ghes_notify_lock_irq, flags);
rc = ghes_proc(ghes);
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
if (rc)
return IRQ_NONE;

Expand All @@ -751,14 +751,17 @@ static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
void *data)
{
struct ghes *ghes;
unsigned long flags;
int ret = NOTIFY_DONE;

spin_lock_irqsave(&ghes_notify_lock_irq, flags);
rcu_read_lock();
list_for_each_entry_rcu(ghes, &ghes_hed, list) {
if (!ghes_proc(ghes))
ret = NOTIFY_OK;
}
rcu_read_unlock();
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);

return ret;
}
Expand Down Expand Up @@ -913,7 +916,14 @@ static LIST_HEAD(ghes_sea);
*/
int ghes_notify_sea(void)
{
return ghes_in_nmi_spool_from_list(&ghes_sea);
static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
int rv;

raw_spin_lock(&ghes_notify_lock_sea);
rv = ghes_in_nmi_spool_from_list(&ghes_sea);
raw_spin_unlock(&ghes_notify_lock_sea);

return rv;
}

static void ghes_sea_add(struct ghes *ghes)
Expand Down Expand Up @@ -946,14 +956,17 @@ static LIST_HEAD(ghes_nmi);

static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
{
static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
int err, ret = NMI_DONE;

if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
return ret;

raw_spin_lock(&ghes_notify_lock_nmi);
err = ghes_in_nmi_spool_from_list(&ghes_nmi);
if (!err)
ret = NMI_HANDLED;
raw_spin_unlock(&ghes_notify_lock_nmi);

atomic_dec(&ghes_in_nmi);
return ret;
Expand Down Expand Up @@ -995,6 +1008,7 @@ static int ghes_probe(struct platform_device *ghes_dev)
{
struct acpi_hest_generic *generic;
struct ghes *ghes = NULL;
unsigned long flags;

int rc = -EINVAL;

Expand Down Expand Up @@ -1097,7 +1111,9 @@ static int ghes_probe(struct platform_device *ghes_dev)
ghes_edac_register(ghes, &ghes_dev->dev);

/* Handle any pending errors right away */
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
ghes_proc(ghes);
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);

return 0;

Expand Down

0 comments on commit 2ff3da1

Please sign in to comment.