Skip to content

Commit

Permalink
scsi: ufs: core: Introduce a new clock_gating lock
Browse files Browse the repository at this point in the history
Introduce a new clock gating lock to serialize access to some of the clock
gating members instead of the host_lock.

While at it, simplify the code with the guard() macro and co for automatic
cleanup of the new lock. There are some explicit
spin_lock_irqsave()/spin_unlock_irqrestore() snaking instances I left
behind because I couldn't make heads or tails of it.

Additionally, move the trace_ufshcd_clk_gating() call from inside the
region protected by the lock as it doesn't needs protection.

Signed-off-by: Avri Altman <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Reviewed-by: Bart Van Assche <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
  • Loading branch information
avri-altman-sndk authored and martinkpetersen committed Dec 4, 2024
1 parent 7869c65 commit 209f4e4
Show file tree
Hide file tree
Showing 2 changed files with 59 additions and 59 deletions.
109 changes: 52 additions & 57 deletions drivers/ufs/core/ufshcd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1816,19 +1816,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
unsigned long flags;
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);

cancel_delayed_work_sync(&hba->clk_gating.gate_work);

spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == CLKS_ON) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
return;
scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
if (hba->clk_gating.state == CLKS_ON)
return;
}

spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_hba_vreg_set_hpm(hba);
ufshcd_setup_clocks(hba, true);

Expand Down Expand Up @@ -1863,7 +1860,7 @@ void ufshcd_hold(struct ufs_hba *hba)
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
return;
spin_lock_irqsave(hba->host->host_lock, flags);
spin_lock_irqsave(&hba->clk_gating.lock, flags);
hba->clk_gating.active_reqs++;

start:
Expand All @@ -1879,11 +1876,11 @@ void ufshcd_hold(struct ufs_hba *hba)
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result)
return;
spin_lock_irqsave(hba->host->host_lock, flags);
spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
}
break;
Expand Down Expand Up @@ -1912,48 +1909,50 @@ void ufshcd_hold(struct ufs_hba *hba)
*/
fallthrough;
case REQ_CLKS_ON:
spin_unlock_irqrestore(hba->host->host_lock, flags);
spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
spin_lock_irqsave(hba->host->host_lock, flags);
spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
default:
dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
__func__, hba->clk_gating.state);
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_hold);

static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.gate_work.work);
unsigned long flags;
int ret;

spin_lock_irqsave(hba->host->host_lock, flags);
/*
* In case you are here to cancel this work the gating state
* would be marked as REQ_CLKS_ON. In this case save time by
* skipping the gating work and exit after changing the clock
* state to CLKS_ON.
*/
if (hba->clk_gating.is_suspended ||
(hba->clk_gating.state != REQ_CLKS_OFF)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
goto rel_lock;
}
scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
/*
* In case you are here to cancel this work the gating state
* would be marked as REQ_CLKS_ON. In this case save time by
* skipping the gating work and exit after changing the clock
* state to CLKS_ON.
*/
if (hba->clk_gating.is_suspended ||
hba->clk_gating.state != REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
return;
}

if (ufshcd_is_ufs_dev_busy(hba) ||
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
hba->clk_gating.active_reqs)
goto rel_lock;
if (hba->clk_gating.active_reqs)
return;
}

spin_unlock_irqrestore(hba->host->host_lock, flags);
scoped_guard(spinlock_irqsave, hba->host->host_lock) {
if (ufshcd_is_ufs_dev_busy(hba) ||
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
return;
}

/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
Expand All @@ -1964,7 +1963,7 @@ static void ufshcd_gate_work(struct work_struct *work)
__func__, ret);
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
goto out;
return;
}
ufshcd_set_link_hibern8(hba);
}
Expand All @@ -1984,32 +1983,34 @@ static void ufshcd_gate_work(struct work_struct *work)
* prevent from doing cancel work multiple times when there are
* new requests arriving before the current cancel work is done.
*/
spin_lock_irqsave(hba->host->host_lock, flags);
guard(spinlock_irqsave)(&hba->clk_gating.lock);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
rel_lock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
return;
}

/* host lock must be held before calling this variant */
static void __ufshcd_release(struct ufs_hba *hba)
{
lockdep_assert_held(&hba->clk_gating.lock);

if (!ufshcd_is_clkgating_allowed(hba))
return;

hba->clk_gating.active_reqs--;

if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
ufshcd_has_pending_tasks(hba) || !hba->clk_gating.is_initialized ||
!hba->clk_gating.is_initialized ||
hba->clk_gating.state == CLKS_OFF)
return;

scoped_guard(spinlock_irqsave, hba->host->host_lock) {
if (ufshcd_has_pending_tasks(hba) ||
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
return;
}

hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
queue_delayed_work(hba->clk_gating.clk_gating_workq,
Expand All @@ -2019,11 +2020,8 @@ static void __ufshcd_release(struct ufs_hba *hba)

void ufshcd_release(struct ufs_hba *hba)
{
unsigned long flags;

spin_lock_irqsave(hba->host->host_lock, flags);
guard(spinlock_irqsave)(&hba->clk_gating.lock);
__ufshcd_release(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_release);

Expand All @@ -2038,11 +2036,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned long flags;

spin_lock_irqsave(hba->host->host_lock, flags);
guard(spinlock_irqsave)(&hba->clk_gating.lock);
hba->clk_gating.delay_ms = value;
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);

Expand Down Expand Up @@ -2070,26 +2066,25 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned long flags;
u32 value;

if (kstrtou32(buf, 0, &value))
return -EINVAL;

value = !!value;

spin_lock_irqsave(hba->host->host_lock, flags);
guard(spinlock_irqsave)(&hba->clk_gating.lock);

if (value == hba->clk_gating.is_enabled)
goto out;
return count;

if (value)
__ufshcd_release(hba);
else
hba->clk_gating.active_reqs++;

hba->clk_gating.is_enabled = value;
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);

return count;
}

Expand Down Expand Up @@ -2131,6 +2126,8 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);

spin_lock_init(&hba->clk_gating.lock);

hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
"ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
hba->host->host_no);
Expand Down Expand Up @@ -9126,7 +9123,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
unsigned long flags;
ktime_t start = ktime_get();
bool clk_state_changed = false;

Expand Down Expand Up @@ -9177,11 +9173,10 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
clk_disable_unprepare(clki->clk);
}
} else if (!ret && on) {
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.state = CLKS_ON;
scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}

if (clk_state_changed)
Expand Down
9 changes: 7 additions & 2 deletions include/ufs/ufshcd.h
Original file line number Diff line number Diff line change
Expand Up @@ -403,6 +403,9 @@ enum clk_gating_state {
* delay_ms
* @ungate_work: worker to turn on clocks that will be used in case of
* interrupt context
* @clk_gating_workq: workqueue for clock gating work.
* @lock: serialize access to some struct ufs_clk_gating members. An outer lock
* relative to the host lock
* @state: the current clocks state
* @delay_ms: gating delay in ms
* @is_suspended: clk gating is suspended when set to 1 which can be used
Expand All @@ -413,11 +416,14 @@ enum clk_gating_state {
* @is_initialized: Indicates whether clock gating is initialized or not
* @active_reqs: number of requests that are pending and should be waited for
* completion before gating clocks.
* @clk_gating_workq: workqueue for clock gating work.
*/
struct ufs_clk_gating {
struct delayed_work gate_work;
struct work_struct ungate_work;
struct workqueue_struct *clk_gating_workq;

spinlock_t lock;

enum clk_gating_state state;
unsigned long delay_ms;
bool is_suspended;
Expand All @@ -426,7 +432,6 @@ struct ufs_clk_gating {
bool is_enabled;
bool is_initialized;
int active_reqs;
struct workqueue_struct *clk_gating_workq;
};

/**
Expand Down

0 comments on commit 209f4e4

Please sign in to comment.