From 03e2a1209a83a380df34a72f7d6d1bc6c74132c7 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 6 Sep 2024 18:22:22 +0200 Subject: [PATCH 001/374] libfs: fix get_stashed_dentry() commit 4e32c25b58b945f976435bbe51f39b32d714052e upstream. get_stashed_dentry() tries to optimistically retrieve a stashed dentry from a provided location. It needs to ensure to hold rcu lock before it dereference the stashed location to prevent UAF issues. Use rcu_dereference() instead of READ_ONCE() it's effectively equivalent with some lockdep bells and whistles and it communicates clearly that this expects rcu protection. Link: https://lore.kernel.org/r/20240906-vfs-hotfix-5959800ffa68@brauner Fixes: 07fd7c329839 ("libfs: add path_from_stashed()") Reported-by: syzbot+f82b36bffae7ef78b6a7@syzkaller.appspotmail.com Fixes: syzbot+f82b36bffae7ef78b6a7@syzkaller.appspotmail.com Reported-by: syzbot+cbe4b96e1194b0e34db6@syzkaller.appspotmail.com Fixes: syzbot+cbe4b96e1194b0e34db6@syzkaller.appspotmail.com Signed-off-by: Christian Brauner Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/libfs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/libfs.c b/fs/libfs.c index 65279e53fbf2..bb7d6e523ee3 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -2043,12 +2043,12 @@ struct timespec64 simple_inode_init_ts(struct inode *inode) } EXPORT_SYMBOL(simple_inode_init_ts); -static inline struct dentry *get_stashed_dentry(struct dentry *stashed) +static inline struct dentry *get_stashed_dentry(struct dentry **stashed) { struct dentry *dentry; guard(rcu)(); - dentry = READ_ONCE(stashed); + dentry = rcu_dereference(*stashed); if (!dentry) return NULL; if (!lockref_get_not_dead(&dentry->d_lockref)) @@ -2145,7 +2145,7 @@ int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data, const struct stashed_operations *sops = mnt->mnt_sb->s_fs_info; /* See if dentry can be reused. */ - path->dentry = get_stashed_dentry(*stashed); + path->dentry = get_stashed_dentry(stashed); if (path->dentry) { sops->put_data(data); goto out_path; From 32008ab989ddcff1a485fa2b4906234c25dc5cd6 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Sun, 1 Sep 2024 11:16:07 -0700 Subject: [PATCH 002/374] sch/netem: fix use after free in netem_dequeue commit 3b3a2a9c6349e25a025d2330f479bc33a6ccb54a upstream. If netem_dequeue() enqueues packet to inner qdisc and that qdisc returns __NET_XMIT_STOLEN. The packet is dropped but qdisc_tree_reduce_backlog() is not called to update the parent's q.qlen, leading to the similar use-after-free as Commit e04991a48dbaf382 ("netem: fix return value if duplicate enqueue fails") Commands to trigger KASAN UaF: ip link add type dummy ip link set lo up ip link set dummy0 up tc qdisc add dev lo parent root handle 1: drr tc filter add dev lo parent 1: basic classid 1:1 tc class add dev lo classid 1:1 drr tc qdisc add dev lo parent 1:1 handle 2: netem tc qdisc add dev lo parent 2: handle 3: drr tc filter add dev lo parent 3: basic classid 3:1 action mirred egress redirect dev dummy0 tc class add dev lo classid 3:1 drr ping -c1 -W0.01 localhost # Trigger bug tc class del dev lo classid 1:1 tc class add dev lo classid 1:1 drr ping -c1 -W0.01 localhost # UaF Fixes: 50612537e9ab ("netem: fix classful handling") Reported-by: Budimir Markovic Signed-off-by: Stephen Hemminger Link: https://patch.msgid.link/20240901182438.4992-1-stephen@networkplumber.org Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/sched/sch_netem.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 0f8d581438c3..39382ee1e331 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -742,11 +742,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) err = qdisc_enqueue(skb, q->qdisc, &to_free); kfree_skb_list(to_free); - if (err != NET_XMIT_SUCCESS && - net_xmit_drop_count(err)) { - qdisc_qstats_drop(sch); - qdisc_tree_reduce_backlog(sch, 1, - pkt_len); + if (err != NET_XMIT_SUCCESS) { + if (net_xmit_drop_count(err)) + qdisc_qstats_drop(sch); + qdisc_tree_reduce_backlog(sch, 1, pkt_len); } goto tfifo_dequeue; } From 590768e13dddb65d96c11da8da664ce55c5a2eb9 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 22 Aug 2024 16:59:33 -0700 Subject: [PATCH 003/374] xfs: xfs_finobt_count_blocks() walks the wrong btree commit 95179935beadccaf0f0bb461adb778731e293da4 upstream. As a result of the factoring in commit 14dd46cf31f4 ("xfs: split xfs_inobt_init_cursor"), mount started taking a long time on a user's filesystem. For Anders, this made mount times regress from under a second to over 15 minutes for a filesystem with only 30 million inodes in it. Anders bisected it down to the above commit, but even then the bug was not obvious. In this commit, over 20 calls to xfs_inobt_init_cursor() were modified, and some we modified to call a new function named xfs_finobt_init_cursor(). If that takes you a moment to reread those function names to see what the rename was, then you have realised why this bug wasn't spotted during review. And it wasn't spotted on inspection even after the bisect pointed at this commit - a single missing "f" isn't the easiest thing for a human eye to notice.... The result is that xfs_finobt_count_blocks() now incorrectly calls xfs_inobt_init_cursor() so it is now walking the inobt instead of the finobt. Hence when there are lots of allocated inodes in a filesystem, mount takes a -long- time run because it now walks a massive allocated inode btrees instead of the small, nearly empty free inode btrees. It also means all the finobt space reservations are wrong, so mount could potentially given ENOSPC on kernel upgrade. In hindsight, commit 14dd46cf31f4 should have been two commits - the first to convert the finobt callers to the new API, the second to modify the xfs_inobt_init_cursor() API for the inobt callers. That would have made the bug very obvious during review. Fixes: 14dd46cf31f4 ("xfs: split xfs_inobt_init_cursor") Reported-by: Anders Blomdell Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong Signed-off-by: Chandan Babu R Signed-off-by: Greg Kroah-Hartman --- fs/xfs/libxfs/xfs_ialloc_btree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index 42e9fd47f6c7..ffef1c75dd57 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c @@ -749,7 +749,7 @@ xfs_finobt_count_blocks( if (error) return error; - cur = xfs_inobt_init_cursor(pag, tp, agbp); + cur = xfs_finobt_init_cursor(pag, tp, agbp); error = xfs_btree_count_blocks(cur, tree_blocks); xfs_btree_del_cursor(cur, error); xfs_trans_brelse(tp, agbp); From 2e7189d2b1de51fc2567676cd4f96c0fe0960b9f Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Thu, 29 Aug 2024 15:03:20 +0300 Subject: [PATCH 004/374] net: ethernet: ti: am65-cpsw: Fix NULL dereference on XDP_TX commit 0a50c35277f96481a5a6ed5faf347f282040c57d upstream. If number of TX queues are set to 1 we get a NULL pointer dereference during XDP_TX. ~# ethtool -L eth0 tx 1 ~# ./xdp-trafficgen udp -A -a eth0 -t 2 Transmitting on eth0 (ifindex 2) [ 241.135257] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000030 Fix this by using actual TX queues instead of max TX queues when picking the TX channel in am65_cpsw_ndo_xdp_xmit(). Fixes: 8acacc40f733 ("net: ethernet: ti: am65-cpsw: Add minimal XDP support") Signed-off-by: Roger Quadros Reviewed-by: Jacob Keller Acked-by: Julien Panis Reviewed-by: MD Danish Anwar Signed-off-by: Paolo Abeni Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/ti/am65-cpsw-nuss.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 4e50b3792888..af5be6191c20 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1918,12 +1918,13 @@ static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, u32 flags) { + struct am65_cpsw_common *common = am65_ndev_to_common(ndev); struct am65_cpsw_tx_chn *tx_chn; struct netdev_queue *netif_txq; int cpu = smp_processor_id(); int i, nxmit = 0; - tx_chn = &am65_ndev_to_common(ndev)->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES]; + tx_chn = &common->tx_chns[cpu % common->tx_ch_num]; netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); __netif_tx_lock(netif_txq, cpu); From f7fe95f40c85311c98913fe6ae2c56adb7f767a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jens=20Emil=20Schulz=20=C3=98stergaard?= Date: Thu, 29 Aug 2024 11:52:54 +0200 Subject: [PATCH 005/374] net: microchip: vcap: Fix use-after-free error in kunit test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a3c1e45156ad39f225cd7ddae0f81230a3b1e657 upstream. This is a clear use-after-free error. We remove it, and rely on checking the return code of vcap_del_rule. Reported-by: Dan Carpenter Closes: https://lore.kernel.org/kernel-janitors/7bffefc6-219a-4f71-baa0-ad4526e5c198@kili.mountain/ Fixes: c956b9b318d9 ("net: microchip: sparx5: Adding KUNIT tests of key/action values in VCAP API") Signed-off-by: Jens Emil Schulz Østergaard Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- .../net/ethernet/microchip/vcap/vcap_api_kunit.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c index fe4e166de8a0..79276bc3d495 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c @@ -1442,18 +1442,8 @@ static void vcap_api_encode_rule_test(struct kunit *test) vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0, rule->cookie, false); - vcap_free_rule(rule); - - /* Check that the rule has been freed: tricky to access since this - * memory should not be accessible anymore - */ - KUNIT_EXPECT_PTR_NE(test, NULL, rule); - ret = list_empty(&rule->keyfields); - KUNIT_EXPECT_EQ(test, true, ret); - ret = list_empty(&rule->actionfields); - KUNIT_EXPECT_EQ(test, true, ret); - - vcap_del_rule(&test_vctrl, &test_netdev, id); + ret = vcap_del_rule(&test_vctrl, &test_netdev, id); + KUNIT_EXPECT_EQ(test, 0, ret); } static void vcap_api_set_rule_counter_test(struct kunit *test) From 396c88ab6dab6eae2cb34b4ba3634d17f9626c83 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Thu, 29 Aug 2024 15:03:19 +0300 Subject: [PATCH 006/374] net: ethernet: ti: am65-cpsw: fix XDP_DROP, XDP_TX and XDP_REDIRECT commit 5e24db550bd6f484d2c7687ee488708260e1f84a upstream. The following XDP_DROP test from [1] stalls the interface after 250 packets. ~# xdb-bench drop -m native eth0 This is because new RX requests are never queued. Fix that. The below XDP_TX test from [1] fails with a warning [ 499.947381] XDP_WARN: xdp_update_frame_from_buff(line:277): Driver BUG: missing reserved tailroom ~# xdb-bench tx -m native eth0 Fix that by using PAGE_SIZE during xdp_init_buf(). In XDP_REDIRECT case only 1 packet was processed in rx_poll. Fix it to process up to budget packets. Fix all XDP error cases to call trace_xdp_exception() and drop the packet in am65_cpsw_run_xdp(). [1] xdp-tools suite https://github.com/xdp-project/xdp-tools Fixes: 8acacc40f733 ("net: ethernet: ti: am65-cpsw: Add minimal XDP support") Signed-off-by: Roger Quadros Reviewed-by: Jacob Keller Acked-by: Julien Panis Reviewed-by: MD Danish Anwar Signed-off-by: Paolo Abeni Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/ti/am65-cpsw-nuss.c | 62 +++++++++++++----------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index af5be6191c20..902b22de61d1 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -156,12 +156,13 @@ #define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7 /* XDP */ -#define AM65_CPSW_XDP_CONSUMED 2 -#define AM65_CPSW_XDP_REDIRECT 1 +#define AM65_CPSW_XDP_CONSUMED BIT(1) +#define AM65_CPSW_XDP_REDIRECT BIT(0) #define AM65_CPSW_XDP_PASS 0 /* Include headroom compatible with both skb and xdpf */ -#define AM65_CPSW_HEADROOM (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) +#define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) +#define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long)) static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave, const u8 *dev_addr) @@ -933,7 +934,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); if (unlikely(!host_desc)) { ndev->stats.tx_dropped++; - return -ENOMEM; + return AM65_CPSW_XDP_CONSUMED; /* drop */ } am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type); @@ -942,7 +943,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, pkt_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) { ndev->stats.tx_dropped++; - ret = -ENOMEM; + ret = AM65_CPSW_XDP_CONSUMED; /* drop */ goto pool_free; } @@ -977,6 +978,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, /* Inform BQL */ netdev_tx_completed_queue(netif_txq, 1, pkt_len); ndev->stats.tx_errors++; + ret = AM65_CPSW_XDP_CONSUMED; /* drop */ goto dma_unmap; } @@ -1004,6 +1006,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, struct bpf_prog *prog; struct page *page; u32 act; + int err; prog = READ_ONCE(port->xdp_prog); if (!prog) @@ -1023,14 +1026,14 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) - break; + goto drop; __netif_tx_lock(netif_txq, cpu); - ret = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf, + err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf, AM65_CPSW_TX_BUF_TYPE_XDP_TX); __netif_tx_unlock(netif_txq); - if (ret) - break; + if (err) + goto drop; ndev->stats.rx_bytes += *len; ndev->stats.rx_packets++; @@ -1038,7 +1041,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, goto out; case XDP_REDIRECT: if (unlikely(xdp_do_redirect(ndev, xdp, prog))) - break; + goto drop; ndev->stats.rx_bytes += *len; ndev->stats.rx_packets++; @@ -1048,6 +1051,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, bpf_warn_invalid_xdp_action(ndev, prog, act); fallthrough; case XDP_ABORTED: +drop: trace_xdp_exception(ndev, prog, act); fallthrough; case XDP_DROP: @@ -1056,7 +1060,6 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, page = virt_to_head_page(xdp->data); am65_cpsw_put_page(rx_chn, page, true, desc_idx); - out: return ret; } @@ -1095,7 +1098,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info) } static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, - u32 flow_idx, int cpu) + u32 flow_idx, int cpu, int *xdp_state) { struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; u32 buf_dma_len, pkt_len, port_id = 0, csum_info; @@ -1114,6 +1117,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, void **swdata; u32 *psdata; + *xdp_state = AM65_CPSW_XDP_PASS; ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma); if (ret) { if (ret != -ENODATA) @@ -1161,15 +1165,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, } if (port->xdp_prog) { - xdp_init_buff(&xdp, AM65_CPSW_MAX_PACKET_SIZE, &port->xdp_rxq); - - xdp_prepare_buff(&xdp, page_addr, skb_headroom(skb), + xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq); + xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM, pkt_len, false); - - ret = am65_cpsw_run_xdp(common, port, &xdp, desc_idx, - cpu, &pkt_len); - if (ret != AM65_CPSW_XDP_PASS) - return ret; + *xdp_state = am65_cpsw_run_xdp(common, port, &xdp, desc_idx, + cpu, &pkt_len); + if (*xdp_state != AM65_CPSW_XDP_PASS) + goto allocate; /* Compute additional headroom to be reserved */ headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb); @@ -1193,9 +1195,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, stats->rx_bytes += pkt_len; u64_stats_update_end(&stats->syncp); +allocate: new_page = page_pool_dev_alloc_pages(rx_chn->page_pool); - if (unlikely(!new_page)) + if (unlikely(!new_page)) { + dev_err(dev, "page alloc failed\n"); return -ENOMEM; + } + rx_chn->pages[desc_idx] = new_page; if (netif_dormant(ndev)) { @@ -1229,8 +1235,9 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx); int flow = AM65_CPSW_MAX_RX_FLOWS; int cpu = smp_processor_id(); - bool xdp_redirect = false; + int xdp_state_or = 0; int cur_budget, ret; + int xdp_state; int num_rx = 0; /* process every flow */ @@ -1238,12 +1245,11 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) cur_budget = budget - num_rx; while (cur_budget--) { - ret = am65_cpsw_nuss_rx_packets(common, flow, cpu); - if (ret) { - if (ret == AM65_CPSW_XDP_REDIRECT) - xdp_redirect = true; + ret = am65_cpsw_nuss_rx_packets(common, flow, cpu, + &xdp_state); + xdp_state_or |= xdp_state; + if (ret) break; - } num_rx++; } @@ -1251,7 +1257,7 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) break; } - if (xdp_redirect) + if (xdp_state_or & AM65_CPSW_XDP_REDIRECT) xdp_do_flush(); dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget); From 6a14fad8be178df6c4589667efec1789a3307b4e Mon Sep 17 00:00:00 2001 From: robelin Date: Fri, 23 Aug 2024 14:43:41 +0000 Subject: [PATCH 007/374] ASoC: dapm: Fix UAF for snd_soc_pcm_runtime object commit b4a90b543d9f62d3ac34ec1ab97fc5334b048565 upstream. When using kernel with the following extra config, - CONFIG_KASAN=y - CONFIG_KASAN_GENERIC=y - CONFIG_KASAN_INLINE=y - CONFIG_KASAN_VMALLOC=y - CONFIG_FRAME_WARN=4096 kernel detects that snd_pcm_suspend_all() access a freed 'snd_soc_pcm_runtime' object when the system is suspended, which leads to a use-after-free bug: [ 52.047746] BUG: KASAN: use-after-free in snd_pcm_suspend_all+0x1a8/0x270 [ 52.047765] Read of size 1 at addr ffff0000b9434d50 by task systemd-sleep/2330 [ 52.047785] Call trace: [ 52.047787] dump_backtrace+0x0/0x3c0 [ 52.047794] show_stack+0x34/0x50 [ 52.047797] dump_stack_lvl+0x68/0x8c [ 52.047802] print_address_description.constprop.0+0x74/0x2c0 [ 52.047809] kasan_report+0x210/0x230 [ 52.047815] __asan_report_load1_noabort+0x3c/0x50 [ 52.047820] snd_pcm_suspend_all+0x1a8/0x270 [ 52.047824] snd_soc_suspend+0x19c/0x4e0 The snd_pcm_sync_stop() has a NULL check on 'substream->runtime' before making any access. So we need to always set 'substream->runtime' to NULL everytime we kfree() it. Fixes: a72706ed8208 ("ASoC: codec2codec: remove ephemeral variables") Signed-off-by: robelin Signed-off-by: Sameer Pujar Link: https://patch.msgid.link/20240823144342.4123814-2-spujar@nvidia.com Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/soc-dapm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 16dad4a45443..1680c2498fe9 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -4066,6 +4066,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, case SND_SOC_DAPM_POST_PMD: kfree(substream->runtime); + substream->runtime = NULL; break; default: From ecdbe8ac86fb5538ccc623a41f88ec96c7168ab9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 23 Jul 2024 16:20:55 -0700 Subject: [PATCH 008/374] KVM: x86: Acquire kvm->srcu when handling KVM_SET_VCPU_EVENTS commit 4bcdd831d9d01e0fb64faea50732b59b2ee88da1 upstream. Grab kvm->srcu when processing KVM_SET_VCPU_EVENTS, as KVM will forcibly leave nested VMX/SVM if SMM mode is being toggled, and leaving nested VMX reads guest memory. Note, kvm_vcpu_ioctl_x86_set_vcpu_events() can also be called from KVM_RUN via sync_regs(), which already holds SRCU. I.e. trying to precisely use kvm_vcpu_srcu_read_lock() around the problematic SMM code would cause problems. Acquiring SRCU isn't all that expensive, so for simplicity, grab it unconditionally for KVM_SET_VCPU_EVENTS. ============================= WARNING: suspicious RCU usage 6.10.0-rc7-332d2c1d713e-next-vm #552 Not tainted ----------------------------- include/linux/kvm_host.h:1027 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by repro/1071: #0: ffff88811e424430 (&vcpu->mutex){+.+.}-{3:3}, at: kvm_vcpu_ioctl+0x7d/0x970 [kvm] stack backtrace: CPU: 15 PID: 1071 Comm: repro Not tainted 6.10.0-rc7-332d2c1d713e-next-vm #552 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 Call Trace: dump_stack_lvl+0x7f/0x90 lockdep_rcu_suspicious+0x13f/0x1a0 kvm_vcpu_gfn_to_memslot+0x168/0x190 [kvm] kvm_vcpu_read_guest+0x3e/0x90 [kvm] nested_vmx_load_msr+0x6b/0x1d0 [kvm_intel] load_vmcs12_host_state+0x432/0xb40 [kvm_intel] vmx_leave_nested+0x30/0x40 [kvm_intel] kvm_vcpu_ioctl_x86_set_vcpu_events+0x15d/0x2b0 [kvm] kvm_arch_vcpu_ioctl+0x1107/0x1750 [kvm] ? mark_held_locks+0x49/0x70 ? kvm_vcpu_ioctl+0x7d/0x970 [kvm] ? kvm_vcpu_ioctl+0x497/0x970 [kvm] kvm_vcpu_ioctl+0x497/0x970 [kvm] ? lock_acquire+0xba/0x2d0 ? find_held_lock+0x2b/0x80 ? do_user_addr_fault+0x40c/0x6f0 ? lock_release+0xb7/0x270 __x64_sys_ioctl+0x82/0xb0 do_syscall_64+0x6c/0x170 entry_SYSCALL_64_after_hwframe+0x4b/0x53 RIP: 0033:0x7ff11eb1b539 Fixes: f7e570780efc ("KVM: x86: Forcibly leave nested virt when SMM state is toggled") Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20240723232055.3643811-1-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0b7adf3bc58a..d8fc894706ef 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6040,7 +6040,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) break; + kvm_vcpu_srcu_read_lock(vcpu); r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); + kvm_vcpu_srcu_read_unlock(vcpu); break; } case KVM_GET_DEBUGREGS: { From 67766d01014ebf9f71868aa5e00d7fb6e5ad1b93 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky Date: Fri, 2 Aug 2024 18:16:08 +0300 Subject: [PATCH 009/374] KVM: SVM: fix emulation of msr reads/writes of MSR_FS_BASE and MSR_GS_BASE commit dad1613e0533b380318281c1519e1a3477c2d0d2 upstream. If these msrs are read by the emulator (e.g due to 'force emulation' prefix), SVM code currently fails to extract the corresponding segment bases, and return them to the emulator. Fix that. Cc: stable@vger.kernel.org Signed-off-by: Maxim Levitsky Link: https://lore.kernel.org/r/20240802151608.72896-3-mlevitsk@redhat.com Signed-off-by: Sean Christopherson Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/svm/svm.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c95d3900fe56..225a40ccf363 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2863,6 +2863,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_CSTAR: msr_info->data = svm->vmcb01.ptr->save.cstar; break; + case MSR_GS_BASE: + msr_info->data = svm->vmcb01.ptr->save.gs.base; + break; + case MSR_FS_BASE: + msr_info->data = svm->vmcb01.ptr->save.fs.base; + break; case MSR_KERNEL_GS_BASE: msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; break; @@ -3088,6 +3094,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_CSTAR: svm->vmcb01.ptr->save.cstar = data; break; + case MSR_GS_BASE: + svm->vmcb01.ptr->save.gs.base = data; + break; + case MSR_FS_BASE: + svm->vmcb01.ptr->save.fs.base = data; + break; case MSR_KERNEL_GS_BASE: svm->vmcb01.ptr->save.kernel_gs_base = data; break; From 85e5f2451853c14429347fc3f7e46ff7850b5b0b Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 8 Aug 2024 06:29:36 +0000 Subject: [PATCH 010/374] KVM: SVM: Don't advertise Bus Lock Detect to guest if SVM support is missing commit 54950bfe2b69cdc06ef753872b5225e54eb73506 upstream. If host supports Bus Lock Detect, KVM advertises it to guests even if SVM support is absent. Additionally, guest wouldn't be able to use it despite guest CPUID bit being set. Fix it by unconditionally clearing the feature bit in KVM cpu capability. Reported-by: Jim Mattson Closes: https://lore.kernel.org/r/CALMp9eRet6+v8Y1Q-i6mqPm4hUow_kJNhmVHfOV8tMfuSS=tVg@mail.gmail.com Fixes: 76ea438b4afc ("KVM: X86: Expose bus lock debug exception to guest") Cc: stable@vger.kernel.org Signed-off-by: Ravi Bangoria Reviewed-by: Jim Mattson Reviewed-by: Tom Lendacky Link: https://lore.kernel.org/r/20240808062937.1149-4-ravi.bangoria@amd.com Signed-off-by: Sean Christopherson Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/svm/svm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 225a40ccf363..0357f7af5596 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5223,6 +5223,9 @@ static __init void svm_set_cpu_caps(void) /* CPUID 0x8000001F (SME/SEV features) */ sev_set_cpu_caps(); + + /* Don't advertise Bus Lock Detect to guest if SVM support is absent */ + kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT); } static __init int svm_hardware_setup(void) From 54e3f7d0a17f5c11a4fa76cc502e12c2fcded623 Mon Sep 17 00:00:00 2001 From: Christoffer Sandberg Date: Tue, 27 Aug 2024 12:25:40 +0200 Subject: [PATCH 011/374] ALSA: hda/conexant: Add pincfg quirk to enable top speakers on Sirius devices commit 4178d78cd7a86510ba68d203f26fc01113c7f126 upstream. The Sirius notebooks have two sets of speakers 0x17 (sides) and 0x1d (top center). The side speakers are active by default but the top speakers aren't. This patch provides a pincfg quirk to activate the top speakers. Signed-off-by: Christoffer Sandberg Signed-off-by: Werner Sembach Cc: stable@vger.kernel.org Link: https://patch.msgid.link/20240827102540.9480-1-wse@tuxedocomputers.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_conexant.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index f030669243f9..e851785ff058 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -307,6 +307,7 @@ enum { CXT_FIXUP_HEADSET_MIC, CXT_FIXUP_HP_MIC_NO_PRESENCE, CXT_PINCFG_SWS_JS201D, + CXT_PINCFG_TOP_SPEAKER, }; /* for hda_fixup_thinkpad_acpi() */ @@ -974,6 +975,13 @@ static const struct hda_fixup cxt_fixups[] = { .type = HDA_FIXUP_PINS, .v.pins = cxt_pincfg_sws_js201d, }, + [CXT_PINCFG_TOP_SPEAKER] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1d, 0x82170111 }, + { } + }, + }, }; static const struct snd_pci_quirk cxt5045_fixups[] = { @@ -1070,6 +1078,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI), SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004), SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205), + SND_PCI_QUIRK(0x2782, 0x12c3, "Sirius Gen1", CXT_PINCFG_TOP_SPEAKER), + SND_PCI_QUIRK(0x2782, 0x12c5, "Sirius Gen2", CXT_PINCFG_TOP_SPEAKER), {} }; @@ -1089,6 +1099,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { { .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" }, { .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" }, { .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" }, + { .id = CXT_PINCFG_TOP_SPEAKER, .name = "sirius-top-speaker" }, {} }; From e083e14b1344cf52751c1700037023e95b6757ba Mon Sep 17 00:00:00 2001 From: Terry Cheong Date: Fri, 30 Aug 2024 04:11:53 +0800 Subject: [PATCH 012/374] ALSA: hda/realtek: add patch for internal mic in Lenovo V145 commit ef27e89e7f3015be2b3c124833fbd6d2e4686561 upstream. Lenovo V145 is having phase inverted dmic but simply applying inverted dmic fixups does not work. Chaining up verb fixes for ALC283 enables inverting dmic fixup to work properly. Signed-off-by: Terry Cheong Cc: Link: https://patch.msgid.link/20240830-lenovo-v145-fixes-v3-1-f7b7265068fa@chromium.org Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 1a7b7e790fca..2708ae289380 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -7428,6 +7428,7 @@ enum { ALC236_FIXUP_HP_GPIO_LED, ALC236_FIXUP_HP_MUTE_LED, ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, + ALC236_FIXUP_LENOVO_INV_DMIC, ALC298_FIXUP_SAMSUNG_AMP, ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, @@ -9049,6 +9050,12 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc236_fixup_hp_mute_led_micmute_vref, }, + [ALC236_FIXUP_LENOVO_INV_DMIC] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_inv_dmic, + .chained = true, + .chain_id = ALC283_FIXUP_INT_MIC, + }, [ALC298_FIXUP_SAMSUNG_AMP] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_samsung_amp, @@ -10609,6 +10616,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), @@ -10860,6 +10868,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"}, {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"}, {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"}, + {.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"}, {} }; #define ALC225_STANDARD_PINS \ From deb8e5a399e941c9c7a7ce599ae67c068fffe27c Mon Sep 17 00:00:00 2001 From: Adam Queler Date: Tue, 3 Sep 2024 16:24:19 -0400 Subject: [PATCH 013/374] ALSA: hda/realtek: Enable Mute Led for HP Victus 15-fb1xxx commit b474f60f6a0c90f560190ac2cc6f20805f35d2c1 upstream. The mute led is controlled by ALC245. This patch enables the already existing quirk for this device. Signed-off-by: Adam Queler Cc: Link: https://patch.msgid.link/20240903202419.31433-1-queler+k@gmail.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2708ae289380..2c06b6c3179e 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10229,6 +10229,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8c16, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c17, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c21, "HP Pavilion Plus Laptop 14-ey0XXX", ALC245_FIXUP_HP_X360_MUTE_LEDS), + SND_PCI_QUIRK(0x103c, 0x8c30, "HP Victus 15-fb1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), From 1a892e42d4cc4655410e90a6ae0ca05a124ef52b Mon Sep 17 00:00:00 2001 From: Vasiliy Kovalev Date: Thu, 5 Sep 2024 17:02:11 +0300 Subject: [PATCH 014/374] ALSA: hda/realtek - Fix inactive headset mic jack for ASUS Vivobook 15 X1504VAP commit a83e4c97ddd7473406ec5e1df8d5e7b24bd7e892 upstream. When the headset is connected, there is no automatic switching of the capture source - you can only manually select the headset microphone in pavucontrol. This patch fixes/activates the inactive microphone of the headset. Signed-off-by: Vasiliy Kovalev Cc: Link: https://patch.msgid.link/20240905140211.937385-1-kovalev@altlinux.org Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2c06b6c3179e..6fb2e56922e0 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10356,6 +10356,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x1e1f, "ASUS Vivobook 15 X1504VAP", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS), SND_PCI_QUIRK(0x1043, 0x1e63, "ASUS H7606W", ALC285_FIXUP_CS35L56_I2C_2), From a6370f7196b630a1e596058634fa9105b0aa5733 Mon Sep 17 00:00:00 2001 From: Maximilien Perreault Date: Tue, 3 Sep 2024 20:10:13 -0700 Subject: [PATCH 015/374] ALSA: hda/realtek: Support mute LED on HP Laptop 14-dq2xxx commit 47a9e8dbb8d4713a9aac7cc6ce3c82dcc94217d8 upstream. The mute LED on this HP laptop uses ALC236 and requires a quirk to function. This patch enables the existing quirk for the device. Signed-off-by: Maximilien Perreault Cc: Link: https://patch.msgid.link/20240904031013.21220-1-maximilienperreault@gmail.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 6fb2e56922e0..1aeafbedb357 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10105,6 +10105,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP), SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP), + SND_PCI_QUIRK(0x103c, 0x87fd, "HP Laptop 14-dq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), From f06af737e4be28c0e926dc25d5f0a111da4e2987 Mon Sep 17 00:00:00 2001 From: "Nysal Jan K.A." Date: Thu, 29 Aug 2024 07:58:27 +0530 Subject: [PATCH 016/374] powerpc/qspinlock: Fix deadlock in MCS queue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 734ad0af3609464f8f93e00b6c0de1e112f44559 upstream. If an interrupt occurs in queued_spin_lock_slowpath() after we increment qnodesp->count and before node->lock is initialized, another CPU might see stale lock values in get_tail_qnode(). If the stale lock value happens to match the lock on that CPU, then we write to the "next" pointer of the wrong qnode. This causes a deadlock as the former CPU, once it becomes the head of the MCS queue, will spin indefinitely until it's "next" pointer is set by its successor in the queue. Running stress-ng on a 16 core (16EC/16VP) shared LPAR, results in occasional lockups similar to the following: $ stress-ng --all 128 --vm-bytes 80% --aggressive \ --maximize --oomable --verify --syslog \ --metrics --times --timeout 5m watchdog: CPU 15 Hard LOCKUP ...... NIP [c0000000000b78f4] queued_spin_lock_slowpath+0x1184/0x1490 LR [c000000001037c5c] _raw_spin_lock+0x6c/0x90 Call Trace: 0xc000002cfffa3bf0 (unreliable) _raw_spin_lock+0x6c/0x90 raw_spin_rq_lock_nested.part.135+0x4c/0xd0 sched_ttwu_pending+0x60/0x1f0 __flush_smp_call_function_queue+0x1dc/0x670 smp_ipi_demux_relaxed+0xa4/0x100 xive_muxed_ipi_action+0x20/0x40 __handle_irq_event_percpu+0x80/0x240 handle_irq_event_percpu+0x2c/0x80 handle_percpu_irq+0x84/0xd0 generic_handle_irq+0x54/0x80 __do_irq+0xac/0x210 __do_IRQ+0x74/0xd0 0x0 do_IRQ+0x8c/0x170 hardware_interrupt_common_virt+0x29c/0x2a0 --- interrupt: 500 at queued_spin_lock_slowpath+0x4b8/0x1490 ...... NIP [c0000000000b6c28] queued_spin_lock_slowpath+0x4b8/0x1490 LR [c000000001037c5c] _raw_spin_lock+0x6c/0x90 --- interrupt: 500 0xc0000029c1a41d00 (unreliable) _raw_spin_lock+0x6c/0x90 futex_wake+0x100/0x260 do_futex+0x21c/0x2a0 sys_futex+0x98/0x270 system_call_exception+0x14c/0x2f0 system_call_vectored_common+0x15c/0x2ec The following code flow illustrates how the deadlock occurs. For the sake of brevity, assume that both locks (A and B) are contended and we call the queued_spin_lock_slowpath() function. CPU0 CPU1 ---- ---- spin_lock_irqsave(A) | spin_unlock_irqrestore(A) | spin_lock(B) | | | ▼ | id = qnodesp->count++; | (Note that nodes[0].lock == A) | | | ▼ | Interrupt | (happens before "nodes[0].lock = B") | | | ▼ | spin_lock_irqsave(A) | | | ▼ | id = qnodesp->count++ | nodes[1].lock = A | | | ▼ | Tail of MCS queue | | spin_lock_irqsave(A) ▼ | Head of MCS queue ▼ | CPU0 is previous tail ▼ | Spin indefinitely ▼ (until "nodes[1].next != NULL") prev = get_tail_qnode(A, CPU0) | ▼ prev == &qnodes[CPU0].nodes[0] (as qnodes[CPU0].nodes[0].lock == A) | ▼ WRITE_ONCE(prev->next, node) | ▼ Spin indefinitely (until nodes[0].locked == 1) Thanks to Saket Kumar Bhaskar for help with recreating the issue Fixes: 84990b169557 ("powerpc/qspinlock: add mcs queueing for contended waiters") Cc: stable@vger.kernel.org # v6.2+ Reported-by: Geetika Moolchandani Reported-by: Vaishnavi Bhat Reported-by: Jijo Varghese Signed-off-by: Nysal Jan K.A. Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://msgid.link/20240829022830.1164355-1-nysal@linux.ibm.com Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/lib/qspinlock.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c index 5de4dd549f6e..bcc7e4dff8c3 100644 --- a/arch/powerpc/lib/qspinlock.c +++ b/arch/powerpc/lib/qspinlock.c @@ -697,7 +697,15 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b } release: - qnodesp->count--; /* release the node */ + /* + * Clear the lock before releasing the node, as another CPU might see stale + * values if an interrupt occurs after we increment qnodesp->count + * but before node->lock is initialized. The barrier ensures that + * there are no further stores to the node after it has been released. + */ + node->lock = NULL; + barrier(); + qnodesp->count--; } void queued_spin_lock_slowpath(struct qspinlock *lock) From 762099898309218b4a7954f3d49e985dc4dfd638 Mon Sep 17 00:00:00 2001 From: Paulo Alcantara Date: Tue, 3 Sep 2024 10:53:24 -0300 Subject: [PATCH 017/374] smb: client: fix double put of @cfile in smb2_set_path_size() commit f9c169b51b6ce20394594ef674d6b10efba31220 upstream. If smb2_compound_op() is called with a valid @cfile and returned -EINVAL, we need to call cifs_get_writable_path() before retrying it as the reference of @cfile was already dropped by previous call. This fixes the following KASAN splat when running fstests generic/013 against Windows Server 2022: CIFS: Attempting to mount //w22-fs0/scratch run fstests generic/013 at 2024-09-02 19:48:59 ================================================================== BUG: KASAN: slab-use-after-free in detach_if_pending+0xab/0x200 Write of size 8 at addr ffff88811f1a3730 by task kworker/3:2/176 CPU: 3 UID: 0 PID: 176 Comm: kworker/3:2 Not tainted 6.11.0-rc6 #2 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-2.fc40 04/01/2014 Workqueue: cifsoplockd cifs_oplock_break [cifs] Call Trace: dump_stack_lvl+0x5d/0x80 ? detach_if_pending+0xab/0x200 print_report+0x156/0x4d9 ? detach_if_pending+0xab/0x200 ? __virt_addr_valid+0x145/0x300 ? __phys_addr+0x46/0x90 ? detach_if_pending+0xab/0x200 kasan_report+0xda/0x110 ? detach_if_pending+0xab/0x200 detach_if_pending+0xab/0x200 timer_delete+0x96/0xe0 ? __pfx_timer_delete+0x10/0x10 ? rcu_is_watching+0x20/0x50 try_to_grab_pending+0x46/0x3b0 __cancel_work+0x89/0x1b0 ? __pfx___cancel_work+0x10/0x10 ? kasan_save_track+0x14/0x30 cifs_close_deferred_file+0x110/0x2c0 [cifs] ? __pfx_cifs_close_deferred_file+0x10/0x10 [cifs] ? __pfx_down_read+0x10/0x10 cifs_oplock_break+0x4c1/0xa50 [cifs] ? __pfx_cifs_oplock_break+0x10/0x10 [cifs] ? lock_is_held_type+0x85/0xf0 ? mark_held_locks+0x1a/0x90 process_one_work+0x4c6/0x9f0 ? find_held_lock+0x8a/0xa0 ? __pfx_process_one_work+0x10/0x10 ? lock_acquired+0x220/0x550 ? __list_add_valid_or_report+0x37/0x100 worker_thread+0x2e4/0x570 ? __kthread_parkme+0xd1/0xf0 ? __pfx_worker_thread+0x10/0x10 kthread+0x17f/0x1c0 ? kthread+0xda/0x1c0 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x31/0x60 ? __pfx_kthread+0x10/0x10 ret_from_fork_asm+0x1a/0x30 Allocated by task 1118: kasan_save_stack+0x30/0x50 kasan_save_track+0x14/0x30 __kasan_kmalloc+0xaa/0xb0 cifs_new_fileinfo+0xc8/0x9d0 [cifs] cifs_atomic_open+0x467/0x770 [cifs] lookup_open.isra.0+0x665/0x8b0 path_openat+0x4c3/0x1380 do_filp_open+0x167/0x270 do_sys_openat2+0x129/0x160 __x64_sys_creat+0xad/0xe0 do_syscall_64+0xbb/0x1d0 entry_SYSCALL_64_after_hwframe+0x77/0x7f Freed by task 83: kasan_save_stack+0x30/0x50 kasan_save_track+0x14/0x30 kasan_save_free_info+0x3b/0x70 poison_slab_object+0xe9/0x160 __kasan_slab_free+0x32/0x50 kfree+0xf2/0x300 process_one_work+0x4c6/0x9f0 worker_thread+0x2e4/0x570 kthread+0x17f/0x1c0 ret_from_fork+0x31/0x60 ret_from_fork_asm+0x1a/0x30 Last potentially related work creation: kasan_save_stack+0x30/0x50 __kasan_record_aux_stack+0xad/0xc0 insert_work+0x29/0xe0 __queue_work+0x5ea/0x760 queue_work_on+0x6d/0x90 _cifsFileInfo_put+0x3f6/0x770 [cifs] smb2_compound_op+0x911/0x3940 [cifs] smb2_set_path_size+0x228/0x270 [cifs] cifs_set_file_size+0x197/0x460 [cifs] cifs_setattr+0xd9c/0x14b0 [cifs] notify_change+0x4e3/0x740 do_truncate+0xfa/0x180 vfs_truncate+0x195/0x200 __x64_sys_truncate+0x109/0x150 do_syscall_64+0xbb/0x1d0 entry_SYSCALL_64_after_hwframe+0x77/0x7f Fixes: 71f15c90e785 ("smb: client: retry compound request without reusing lease") Cc: stable@vger.kernel.org Signed-off-by: Paulo Alcantara (Red Hat) Cc: David Howells Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/smb/client/smb2inode.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c index 9f5bc41433c1..2a2847601f26 100644 --- a/fs/smb/client/smb2inode.c +++ b/fs/smb/client/smb2inode.c @@ -1149,6 +1149,7 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon, cfile, NULL, NULL, dentry); if (rc == -EINVAL) { cifs_dbg(FYI, "invalid lease key, resending request without lease"); + cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, &in_iov, &(int){SMB2_OP_SET_EOF}, 1, From 4c8496f44f5bb5c06cdef5eb130ab259643392a1 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Tue, 27 Aug 2024 21:44:41 +0900 Subject: [PATCH 018/374] ksmbd: unset the binding mark of a reused connection commit 78c5a6f1f630172b19af4912e755e1da93ef0ab5 upstream. Steve French reported null pointer dereference error from sha256 lib. cifs.ko can send session setup requests on reused connection. If reused connection is used for binding session, conn->binding can still remain true and generate_preauth_hash() will not set sess->Preauth_HashValue and it will be NULL. It is used as a material to create an encryption key in ksmbd_gen_smb311_encryptionkey. ->Preauth_HashValue cause null pointer dereference error from crypto_shash_update(). BUG: kernel NULL pointer dereference, address: 0000000000000000 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 0 P4D 0 Oops: 0000 [#1] PREEMPT SMP PTI CPU: 8 PID: 429254 Comm: kworker/8:39 Hardware name: LENOVO 20MAS08500/20MAS08500, BIOS N2CET69W (1.52 ) Workqueue: ksmbd-io handle_ksmbd_work [ksmbd] RIP: 0010:lib_sha256_base_do_update.isra.0+0x11e/0x1d0 [sha256_ssse3] ? show_regs+0x6d/0x80 ? __die+0x24/0x80 ? page_fault_oops+0x99/0x1b0 ? do_user_addr_fault+0x2ee/0x6b0 ? exc_page_fault+0x83/0x1b0 ? asm_exc_page_fault+0x27/0x30 ? __pfx_sha256_transform_rorx+0x10/0x10 [sha256_ssse3] ? lib_sha256_base_do_update.isra.0+0x11e/0x1d0 [sha256_ssse3] ? __pfx_sha256_transform_rorx+0x10/0x10 [sha256_ssse3] ? __pfx_sha256_transform_rorx+0x10/0x10 [sha256_ssse3] _sha256_update+0x77/0xa0 [sha256_ssse3] sha256_avx2_update+0x15/0x30 [sha256_ssse3] crypto_shash_update+0x1e/0x40 hmac_update+0x12/0x20 crypto_shash_update+0x1e/0x40 generate_key+0x234/0x380 [ksmbd] generate_smb3encryptionkey+0x40/0x1c0 [ksmbd] ksmbd_gen_smb311_encryptionkey+0x72/0xa0 [ksmbd] ntlm_authenticate.isra.0+0x423/0x5d0 [ksmbd] smb2_sess_setup+0x952/0xaa0 [ksmbd] __process_request+0xa3/0x1d0 [ksmbd] __handle_ksmbd_work+0x1c4/0x2f0 [ksmbd] handle_ksmbd_work+0x2d/0xa0 [ksmbd] process_one_work+0x16c/0x350 worker_thread+0x306/0x440 ? __pfx_worker_thread+0x10/0x10 kthread+0xef/0x120 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x44/0x70 ? __pfx_kthread+0x10/0x10 ret_from_fork_asm+0x1b/0x30 Fixes: f5a544e3bab7 ("ksmbd: add support for SMB3 multichannel") Cc: stable@vger.kernel.org # v5.15+ Signed-off-by: Namjae Jeon Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/smb/server/smb2pdu.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 8cfdf0d1a186..bc69b94df40f 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -1687,6 +1687,8 @@ int smb2_sess_setup(struct ksmbd_work *work) rc = ksmbd_session_register(conn, sess); if (rc) goto out_err; + + conn->binding = false; } else if (conn->dialect >= SMB30_PROT_ID && (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) && req->Flags & SMB2_SESSION_REQ_FLAG_BINDING) { @@ -1765,6 +1767,8 @@ int smb2_sess_setup(struct ksmbd_work *work) sess = NULL; goto out_err; } + + conn->binding = false; } work->sess = sess; From 28be8205346071731dc5108d72b3ff357fac966f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 29 Aug 2024 22:22:35 +0300 Subject: [PATCH 019/374] ksmbd: Unlock on in ksmbd_tcp_set_interfaces() commit 844436e045ac2ab7895d8b281cb784a24de1d14d upstream. Unlock before returning an error code if this allocation fails. Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers") Cc: stable@vger.kernel.org # v5.15+ Signed-off-by: Dan Carpenter Acked-by: Namjae Jeon Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/smb/server/transport_tcp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c index 6633fa78e9b9..2ce7f75059cb 100644 --- a/fs/smb/server/transport_tcp.c +++ b/fs/smb/server/transport_tcp.c @@ -624,8 +624,10 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz) for_each_netdev(&init_net, netdev) { if (netif_is_bridge_port(netdev)) continue; - if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) + if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) { + rtnl_unlock(); return -ENOMEM; + } } rtnl_unlock(); bind_additional_ifaces = 1; From 81790a4dce0688d0b4732fd3983d5506ec40de96 Mon Sep 17 00:00:00 2001 From: Zheng Qixing Date: Thu, 22 Aug 2024 11:30:50 +0800 Subject: [PATCH 020/374] ata: libata: Fix memory leak for error path in ata_host_alloc() commit 284b75a3d83c7631586d98f6dede1d90f128f0db upstream. In ata_host_alloc(), if devres_alloc() fails to allocate the device host resource data pointer, the already allocated ata_host structure is not freed before returning from the function. This results in a potential memory leak. Call kfree(host) before jumping to the error handling path to ensure that the ata_host structure is properly freed if devres_alloc() fails. Fixes: 2623c7a5f279 ("libata: add refcounting to ata_host") Cc: stable@vger.kernel.org Signed-off-by: Zheng Qixing Reviewed-by: Yu Kuai Signed-off-by: Damien Le Moal Signed-off-by: Greg Kroah-Hartman --- drivers/ata/libata-core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 74b59b78d278..71801001ee09 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5583,8 +5583,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports) } dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL); - if (!dr) + if (!dr) { + kfree(host); goto err_out; + } devres_add(dev, dr); dev_set_drvdata(dev, host); From b55ce742afcb8e8189d82f2f1e635ba1b5a461fa Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 26 Aug 2024 15:53:04 +0300 Subject: [PATCH 021/374] x86/tdx: Fix data leak in mmio_read() commit b6fb565a2d15277896583d471b21bc14a0c99661 upstream. The mmio_read() function makes a TDVMCALL to retrieve MMIO data for an address from the VMM. Sean noticed that mmio_read() unintentionally exposes the value of an initialized variable (val) on the stack to the VMM. This variable is only needed as an output value. It did not need to be passed to the VMM in the first place. Do not send the original value of *val to the VMM. [ dhansen: clarify what 'val' is used for. ] Fixes: 31d58c4e557d ("x86/tdx: Handle in-kernel MMIO") Reported-by: Sean Christopherson Signed-off-by: Kirill A. Shutemov Signed-off-by: Dave Hansen Cc:stable@vger.kernel.org Link: https://lore.kernel.org/all/20240826125304.1566719-1-kirill.shutemov%40linux.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/coco/tdx/tdx.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index c1cb90369915..8fe4c2b07128 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -385,7 +385,6 @@ static bool mmio_read(int size, unsigned long addr, unsigned long *val) .r12 = size, .r13 = EPT_READ, .r14 = addr, - .r15 = *val, }; if (__tdx_hypercall(&args)) From 8717dc35c0e5896f4110f4b3882f7ff787a5f73d Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 19 Aug 2024 11:30:04 -0700 Subject: [PATCH 022/374] perf/x86/intel: Limit the period on Haswell commit 25dfc9e357af8aed1ca79b318a73f2c59c1f0b2b upstream. Running the ltp test cve-2015-3290 concurrently reports the following warnings. perfevents: irq loop stuck! WARNING: CPU: 31 PID: 32438 at arch/x86/events/intel/core.c:3174 intel_pmu_handle_irq+0x285/0x370 Call Trace: ? __warn+0xa4/0x220 ? intel_pmu_handle_irq+0x285/0x370 ? __report_bug+0x123/0x130 ? intel_pmu_handle_irq+0x285/0x370 ? __report_bug+0x123/0x130 ? intel_pmu_handle_irq+0x285/0x370 ? report_bug+0x3e/0xa0 ? handle_bug+0x3c/0x70 ? exc_invalid_op+0x18/0x50 ? asm_exc_invalid_op+0x1a/0x20 ? irq_work_claim+0x1e/0x40 ? intel_pmu_handle_irq+0x285/0x370 perf_event_nmi_handler+0x3d/0x60 nmi_handle+0x104/0x330 Thanks to Thomas Gleixner's analysis, the issue is caused by the low initial period (1) of the frequency estimation algorithm, which triggers the defects of the HW, specifically erratum HSW11 and HSW143. (For the details, please refer https://lore.kernel.org/lkml/87plq9l5d2.ffs@tglx/) The HSW11 requires a period larger than 100 for the INST_RETIRED.ALL event, but the initial period in the freq mode is 1. The erratum is the same as the BDM11, which has been supported in the kernel. A minimum period of 128 is enforced as well on HSW. HSW143 is regarding that the fixed counter 1 may overcount 32 with the Hyper-Threading is enabled. However, based on the test, the hardware has more issues than it tells. Besides the fixed counter 1, the message 'interrupt took too long' can be observed on any counter which was armed with a period < 32 and two events expired in the same NMI. A minimum period of 32 is enforced for the rest of the events. The recommended workaround code of the HSW143 is not implemented. Because it only addresses the issue for the fixed counter. It brings extra overhead through extra MSR writing. No related overcounting issue has been reported so far. Fixes: 3a632cb229bf ("perf/x86/intel: Add simple Haswell PMU support") Reported-by: Li Huafei Suggested-by: Thomas Gleixner Signed-off-by: Kan Liang Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Link: https://lore.kernel.org/all/20240819183004.3132920-1-kan.liang@linux.intel.com Closes: https://lore.kernel.org/lkml/20240729223328.327835-1-lihuafei1@huawei.com/ Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/intel/core.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index f25205d047e8..05ec651663cb 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4529,6 +4529,25 @@ static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void) return HYBRID_INTEL_CORE; } +static inline bool erratum_hsw11(struct perf_event *event) +{ + return (event->hw.config & INTEL_ARCH_EVENT_MASK) == + X86_CONFIG(.event=0xc0, .umask=0x01); +} + +/* + * The HSW11 requires a period larger than 100 which is the same as the BDM11. + * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL. + * + * The message 'interrupt took too long' can be observed on any counter which + * was armed with a period < 32 and two events expired in the same NMI. + * A minimum period of 32 is enforced for the rest of the events. + */ +static void hsw_limit_period(struct perf_event *event, s64 *left) +{ + *left = max(*left, erratum_hsw11(event) ? 128 : 32); +} + /* * Broadwell: * @@ -4546,8 +4565,7 @@ static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void) */ static void bdw_limit_period(struct perf_event *event, s64 *left) { - if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == - X86_CONFIG(.event=0xc0, .umask=0x01)) { + if (erratum_hsw11(event)) { if (*left < 128) *left = 128; *left &= ~0x3fULL; @@ -6573,6 +6591,7 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; + x86_pmu.limit_period = hsw_limit_period; x86_pmu.lbr_double_abort = true; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; From 028bd7b41f56d80750c40f77d933eff65ed2af18 Mon Sep 17 00:00:00 2001 From: Ma Ke Date: Tue, 20 Aug 2024 17:28:43 +0800 Subject: [PATCH 023/374] irqchip/gic-v2m: Fix refcount leak in gicv2m_of_init() commit c5af2c90ba5629f0424a8d315f75fb8d91713c3c upstream. gicv2m_of_init() fails to perform an of_node_put() when of_address_to_resource() fails, leading to a refcount leak. Address this by moving the error handling path outside of the loop and making it common to all failure modes. Fixes: 4266ab1a8ff5 ("irqchip/gic-v2m: Refactor to prepare for ACPI support") Signed-off-by: Ma Ke Signed-off-by: Thomas Gleixner Reviewed-by: Marc Zyngier Cc: stable@vger.kernel.org Link: https://lore.kernel.org/all/20240820092843.1219933-1-make24@iscas.ac.cn Signed-off-by: Greg Kroah-Hartman --- drivers/irqchip/irq-gic-v2m.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index f2ff4387870d..d83c2c85962c 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -438,12 +438,12 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle, ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res, 0); - if (ret) { - of_node_put(child); + if (ret) break; - } } + if (ret && child) + of_node_put(child); if (!ret) ret = gicv2m_allocate_domains(parent); if (ret) From 9319bfd74041e209966402d5ae32887f967f0feb Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 20 Aug 2024 09:18:50 +0530 Subject: [PATCH 024/374] irqchip/sifive-plic: Probe plic driver early for Allwinner D1 platform commit 4d936f10ff80274841537a26d1fbfe9984de0ef9 upstream. The latest Linux RISC-V no longer boots on the Allwinner D1 platform because the sun4i_timer driver fails to get an interrupt from PLIC due to the recent conversion of the PLIC to a platform driver. Converting the sun4i timer to a platform driver does not work either because the D1 does not have a SBI timer available so early boot hangs. See the 'Closes:' link for deeper analysis. The real fix requires enabling the SBI time extension in the platform firmware (OpenSBI) and convert sun4i_timer into platform driver. Unfortunately, the real fix involves changing multiple places and can't be achieved in a short duration and aside of that requires users to update firmware. As a work-around, retrofit PLIC probing such that the PLIC is probed early only for the Allwinner D1 platform and probed as a regular platform driver for rest of the RISC-V platforms. In the process, partially revert some of the previous changes because the PLIC device pointer is not available in all probing paths. Fixes: e306a894bd51 ("irqchip/sifive-plic: Chain to parent IRQ after handlers are ready") Fixes: 8ec99b033147 ("irqchip/sifive-plic: Convert PLIC driver into a platform driver") Suggested-by: Thomas Gleixner Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Tested-by: Samuel Holland Tested-by: Emil Renner Berthing Tested-by: Charlie Jenkins Reviewed-by: Samuel Holland Reviewed-by: Charlie Jenkins Cc: stable@vger.kernel.org Link: https://lore.kernel.org/all/20240820034850.3189912-1-apatel@ventanamicro.com Closes: https://lore.kernel.org/lkml/20240814145642.344485-1-emil.renner.berthing@canonical.com/ Signed-off-by: Greg Kroah-Hartman --- drivers/irqchip/irq-sifive-plic.c | 115 ++++++++++++++++++------------ 1 file changed, 71 insertions(+), 44 deletions(-) diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 9e22f7e378f5..4d9ea718086d 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -3,6 +3,7 @@ * Copyright (C) 2017 SiFive * Copyright (C) 2018 Christoph Hellwig */ +#define pr_fmt(fmt) "riscv-plic: " fmt #include #include #include @@ -63,7 +64,7 @@ #define PLIC_QUIRK_EDGE_INTERRUPT 0 struct plic_priv { - struct device *dev; + struct fwnode_handle *fwnode; struct cpumask lmask; struct irq_domain *irqdomain; void __iomem *regs; @@ -378,8 +379,8 @@ static void plic_handle_irq(struct irq_desc *desc) int err = generic_handle_domain_irq(handler->priv->irqdomain, hwirq); if (unlikely(err)) { - dev_warn_ratelimited(handler->priv->dev, - "can't find mapping for hwirq %lu\n", hwirq); + pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", + handler->priv->fwnode, hwirq); } } @@ -408,7 +409,8 @@ static int plic_starting_cpu(unsigned int cpu) enable_percpu_irq(plic_parent_irq, irq_get_trigger_type(plic_parent_irq)); else - dev_warn(handler->priv->dev, "cpu%d: parent irq not available\n", cpu); + pr_warn("%pfwP: cpu%d: parent irq not available\n", + handler->priv->fwnode, cpu); plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); return 0; @@ -424,38 +426,36 @@ static const struct of_device_id plic_match[] = { {} }; -static int plic_parse_nr_irqs_and_contexts(struct platform_device *pdev, +static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode, u32 *nr_irqs, u32 *nr_contexts) { - struct device *dev = &pdev->dev; int rc; /* * Currently, only OF fwnode is supported so extend this * function for ACPI support. */ - if (!is_of_node(dev->fwnode)) + if (!is_of_node(fwnode)) return -EINVAL; - rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,ndev", nr_irqs); + rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs); if (rc) { - dev_err(dev, "riscv,ndev property not available\n"); + pr_err("%pfwP: riscv,ndev property not available\n", fwnode); return rc; } - *nr_contexts = of_irq_count(to_of_node(dev->fwnode)); + *nr_contexts = of_irq_count(to_of_node(fwnode)); if (WARN_ON(!(*nr_contexts))) { - dev_err(dev, "no PLIC context available\n"); + pr_err("%pfwP: no PLIC context available\n", fwnode); return -EINVAL; } return 0; } -static int plic_parse_context_parent(struct platform_device *pdev, u32 context, +static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context, u32 *parent_hwirq, int *parent_cpu) { - struct device *dev = &pdev->dev; struct of_phandle_args parent; unsigned long hartid; int rc; @@ -464,10 +464,10 @@ static int plic_parse_context_parent(struct platform_device *pdev, u32 context, * Currently, only OF fwnode is supported so extend this * function for ACPI support. */ - if (!is_of_node(dev->fwnode)) + if (!is_of_node(fwnode)) return -EINVAL; - rc = of_irq_parse_one(to_of_node(dev->fwnode), context, &parent); + rc = of_irq_parse_one(to_of_node(fwnode), context, &parent); if (rc) return rc; @@ -480,48 +480,55 @@ static int plic_parse_context_parent(struct platform_device *pdev, u32 context, return 0; } -static int plic_probe(struct platform_device *pdev) +static int plic_probe(struct fwnode_handle *fwnode) { int error = 0, nr_contexts, nr_handlers = 0, cpu, i; - struct device *dev = &pdev->dev; unsigned long plic_quirks = 0; struct plic_handler *handler; u32 nr_irqs, parent_hwirq; struct plic_priv *priv; irq_hw_number_t hwirq; + void __iomem *regs; - if (is_of_node(dev->fwnode)) { + if (is_of_node(fwnode)) { const struct of_device_id *id; - id = of_match_node(plic_match, to_of_node(dev->fwnode)); + id = of_match_node(plic_match, to_of_node(fwnode)); if (id) plic_quirks = (unsigned long)id->data; + + regs = of_iomap(to_of_node(fwnode), 0); + if (!regs) + return -ENOMEM; + } else { + return -ENODEV; } - error = plic_parse_nr_irqs_and_contexts(pdev, &nr_irqs, &nr_contexts); + error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts); if (error) - return error; + goto fail_free_regs; - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + error = -ENOMEM; + goto fail_free_regs; + } - priv->dev = dev; + priv->fwnode = fwnode; priv->plic_quirks = plic_quirks; priv->nr_irqs = nr_irqs; + priv->regs = regs; - priv->regs = devm_platform_ioremap_resource(pdev, 0); - if (WARN_ON(!priv->regs)) - return -EIO; - - priv->prio_save = devm_bitmap_zalloc(dev, nr_irqs, GFP_KERNEL); - if (!priv->prio_save) - return -ENOMEM; + priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL); + if (!priv->prio_save) { + error = -ENOMEM; + goto fail_free_priv; + } for (i = 0; i < nr_contexts; i++) { - error = plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu); + error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu); if (error) { - dev_warn(dev, "hwirq for context%d not found\n", i); + pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i); continue; } @@ -543,7 +550,7 @@ static int plic_probe(struct platform_device *pdev) } if (cpu < 0) { - dev_warn(dev, "Invalid cpuid for context %d\n", i); + pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode, i); continue; } @@ -554,7 +561,7 @@ static int plic_probe(struct platform_device *pdev) */ handler = per_cpu_ptr(&plic_handlers, cpu); if (handler->present) { - dev_warn(dev, "handler already present for context %d.\n", i); + pr_warn("%pfwP: handler already present for context %d.\n", fwnode, i); plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); goto done; } @@ -568,8 +575,8 @@ static int plic_probe(struct platform_device *pdev) i * CONTEXT_ENABLE_SIZE; handler->priv = priv; - handler->enable_save = devm_kcalloc(dev, DIV_ROUND_UP(nr_irqs, 32), - sizeof(*handler->enable_save), GFP_KERNEL); + handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), + sizeof(*handler->enable_save), GFP_KERNEL); if (!handler->enable_save) goto fail_cleanup_contexts; done: @@ -581,7 +588,7 @@ static int plic_probe(struct platform_device *pdev) nr_handlers++; } - priv->irqdomain = irq_domain_add_linear(to_of_node(dev->fwnode), nr_irqs + 1, + priv->irqdomain = irq_domain_add_linear(to_of_node(fwnode), nr_irqs + 1, &plic_irqdomain_ops, priv); if (WARN_ON(!priv->irqdomain)) goto fail_cleanup_contexts; @@ -619,13 +626,13 @@ static int plic_probe(struct platform_device *pdev) } } - dev_info(dev, "mapped %d interrupts with %d handlers for %d contexts.\n", - nr_irqs, nr_handlers, nr_contexts); + pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n", + fwnode, nr_irqs, nr_handlers, nr_contexts); return 0; fail_cleanup_contexts: for (i = 0; i < nr_contexts; i++) { - if (plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu)) + if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu)) continue; if (parent_hwirq != RV_IRQ_EXT || cpu < 0) continue; @@ -634,17 +641,37 @@ static int plic_probe(struct platform_device *pdev) handler->present = false; handler->hart_base = NULL; handler->enable_base = NULL; + kfree(handler->enable_save); handler->enable_save = NULL; handler->priv = NULL; } - return -ENOMEM; + bitmap_free(priv->prio_save); +fail_free_priv: + kfree(priv); +fail_free_regs: + iounmap(regs); + return error; +} + +static int plic_platform_probe(struct platform_device *pdev) +{ + return plic_probe(pdev->dev.fwnode); } static struct platform_driver plic_driver = { .driver = { .name = "riscv-plic", .of_match_table = plic_match, + .suppress_bind_attrs = true, }, - .probe = plic_probe, + .probe = plic_platform_probe, }; builtin_platform_driver(plic_driver); + +static int __init plic_early_probe(struct device_node *node, + struct device_node *parent) +{ + return plic_probe(&node->fwnode); +} + +IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe); From cfcd89ed9c57d29197ecc9cee030e6488551c6e6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 14 Aug 2024 00:29:36 +0200 Subject: [PATCH 025/374] x86/kaslr: Expose and use the end of the physical memory address space commit ea72ce5da22806d5713f3ffb39a6d5ae73841f93 upstream. iounmap() on x86 occasionally fails to unmap because the provided valid ioremap address is not below high_memory. It turned out that this happens due to KASLR. KASLR uses the full address space between PAGE_OFFSET and vaddr_end to randomize the starting points of the direct map, vmalloc and vmemmap regions. It thereby limits the size of the direct map by using the installed memory size plus an extra configurable margin for hot-plug memory. This limitation is done to gain more randomization space because otherwise only the holes between the direct map, vmalloc, vmemmap and vaddr_end would be usable for randomizing. The limited direct map size is not exposed to the rest of the kernel, so the memory hot-plug and resource management related code paths still operate under the assumption that the available address space can be determined with MAX_PHYSMEM_BITS. request_free_mem_region() allocates from (1 << MAX_PHYSMEM_BITS) - 1 downwards. That means the first allocation happens past the end of the direct map and if unlucky this address is in the vmalloc space, which causes high_memory to become greater than VMALLOC_START and consequently causes iounmap() to fail for valid ioremap addresses. MAX_PHYSMEM_BITS cannot be changed for that because the randomization does not align with address bit boundaries and there are other places which actually require to know the maximum number of address bits. All remaining usage sites of MAX_PHYSMEM_BITS have been analyzed and found to be correct. Cure this by exposing the end of the direct map via PHYSMEM_END and use that for the memory hot-plug and resource management related places instead of relying on MAX_PHYSMEM_BITS. In the KASLR case PHYSMEM_END maps to a variable which is initialized by the KASLR initialization and otherwise it is based on MAX_PHYSMEM_BITS as before. To prevent future hickups add a check into add_pages() to catch callers trying to add memory above PHYSMEM_END. Fixes: 0483e1fa6e09 ("x86/mm: Implement ASLR for kernel memory regions") Reported-by: Max Ramanouski Reported-by: Alistair Popple Signed-off-by: Thomas Gleixner Tested-By: Max Ramanouski Tested-by: Alistair Popple Reviewed-by: Dan Williams Reviewed-by: Alistair Popple Reviewed-by: Kees Cook Cc: stable@vger.kernel.org Link: https://lore.kernel.org/all/87ed6soy3z.ffs@tglx Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/page_64.h | 1 + arch/x86/include/asm/pgtable_64_types.h | 4 ++++ arch/x86/mm/init_64.c | 4 ++++ arch/x86/mm/kaslr.c | 32 ++++++++++++++++++++----- include/linux/mm.h | 4 ++++ kernel/resource.c | 6 ++--- mm/memory_hotplug.c | 2 +- mm/sparse.c | 2 +- 8 files changed, 43 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index cc6b8e087192..9dab85aba7af 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -17,6 +17,7 @@ extern unsigned long phys_base; extern unsigned long page_offset_base; extern unsigned long vmalloc_base; extern unsigned long vmemmap_base; +extern unsigned long physmem_end; static __always_inline unsigned long __phys_addr_nodebug(unsigned long x) { diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 9053dfe9fa03..a98e53491a4e 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -140,6 +140,10 @@ extern unsigned int ptrs_per_p4d; # define VMEMMAP_START __VMEMMAP_BASE_L4 #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ +#ifdef CONFIG_RANDOMIZE_MEMORY +# define PHYSMEM_END physmem_end +#endif + /* * End of the region for which vmalloc page tables are pre-allocated. * For non-KMSAN builds, this is the same as VMALLOC_END. diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 7e177856ee4f..59087c871246 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -950,8 +950,12 @@ static void update_end_of_memory_vars(u64 start, u64 size) int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct mhp_params *params) { + unsigned long end = ((start_pfn + nr_pages) << PAGE_SHIFT) - 1; int ret; + if (WARN_ON_ONCE(end > PHYSMEM_END)) + return -ERANGE; + ret = __add_pages(nid, start_pfn, nr_pages, params); WARN_ON_ONCE(ret); diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 37db264866b6..230f1dee4f09 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -47,13 +47,24 @@ static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE; */ static __initdata struct kaslr_memory_region { unsigned long *base; + unsigned long *end; unsigned long size_tb; } kaslr_regions[] = { - { &page_offset_base, 0 }, - { &vmalloc_base, 0 }, - { &vmemmap_base, 0 }, + { + .base = &page_offset_base, + .end = &physmem_end, + }, + { + .base = &vmalloc_base, + }, + { + .base = &vmemmap_base, + }, }; +/* The end of the possible address space for physical memory */ +unsigned long physmem_end __ro_after_init; + /* Get size in bytes used by the memory region */ static inline unsigned long get_padding(struct kaslr_memory_region *region) { @@ -82,6 +93,8 @@ void __init kernel_randomize_memory(void) BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE); BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); + /* Preset the end of the possible address space for physical memory */ + physmem_end = ((1ULL << MAX_PHYSMEM_BITS) - 1); if (!kaslr_memory_enabled()) return; @@ -128,11 +141,18 @@ void __init kernel_randomize_memory(void) vaddr += entropy; *kaslr_regions[i].base = vaddr; + /* Calculate the end of the region */ + vaddr += get_padding(&kaslr_regions[i]); /* - * Jump the region and add a minimum padding based on - * randomization alignment. + * KASLR trims the maximum possible size of the + * direct-map. Update the physmem_end boundary. + * No rounding required as the region starts + * PUD aligned and size is in units of TB. */ - vaddr += get_padding(&kaslr_regions[i]); + if (kaslr_regions[i].end) + *kaslr_regions[i].end = __pa_nodebug(vaddr - 1); + + /* Add a minimum padding based on randomization alignment. */ vaddr = round_up(vaddr + 1, PUD_SIZE); remain_entropy -= entropy; } diff --git a/include/linux/mm.h b/include/linux/mm.h index f32177152921..81562397e834 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -97,6 +97,10 @@ extern const int mmap_rnd_compat_bits_max; extern int mmap_rnd_compat_bits __read_mostly; #endif +#ifndef PHYSMEM_END +# define PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1) +#endif + #include #include diff --git a/kernel/resource.c b/kernel/resource.c index fcbca39dbc45..b0e2b15ecb40 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -1832,8 +1832,7 @@ static resource_size_t gfr_start(struct resource *base, resource_size_t size, if (flags & GFR_DESCENDING) { resource_size_t end; - end = min_t(resource_size_t, base->end, - (1ULL << MAX_PHYSMEM_BITS) - 1); + end = min_t(resource_size_t, base->end, PHYSMEM_END); return end - size + 1; } @@ -1850,8 +1849,7 @@ static bool gfr_continue(struct resource *base, resource_size_t addr, * @size did not wrap 0. */ return addr > addr - size && - addr <= min_t(resource_size_t, base->end, - (1ULL << MAX_PHYSMEM_BITS) - 1); + addr <= min_t(resource_size_t, base->end, PHYSMEM_END); } static resource_size_t gfr_next(resource_size_t addr, resource_size_t size, diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 431b1f6753c0..52553b13e277 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1682,7 +1682,7 @@ struct range __weak arch_get_mappable_range(void) struct range mhp_get_pluggable_range(bool need_mapping) { - const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1; + const u64 max_phys = PHYSMEM_END; struct range mhp_range; if (need_mapping) { diff --git a/mm/sparse.c b/mm/sparse.c index de40b2c73406..168278d61bde 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -129,7 +129,7 @@ static inline int sparse_early_nid(struct mem_section *section) static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, unsigned long *end_pfn) { - unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); + unsigned long max_sparsemem_pfn = (PHYSMEM_END + 1) >> PAGE_SHIFT; /* * Sanity checks - do not allow an architecture to pass From f13b5afc5c4889569d84c3011ce449f61fccfb28 Mon Sep 17 00:00:00 2001 From: Roland Xu Date: Thu, 15 Aug 2024 10:58:13 +0800 Subject: [PATCH 026/374] rtmutex: Drop rt_mutex::wait_lock before scheduling commit d33d26036a0274b472299d7dcdaa5fb34329f91b upstream. rt_mutex_handle_deadlock() is called with rt_mutex::wait_lock held. In the good case it returns with the lock held and in the deadlock case it emits a warning and goes into an endless scheduling loop with the lock held, which triggers the 'scheduling in atomic' warning. Unlock rt_mutex::wait_lock in the dead lock case before issuing the warning and dropping into the schedule for ever loop. [ tglx: Moved unlock before the WARN(), removed the pointless comment, massaged changelog, added Fixes tag ] Fixes: 3d5c9340d194 ("rtmutex: Handle deadlock detection smarter") Signed-off-by: Roland Xu Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Link: https://lore.kernel.org/all/ME0P300MB063599BEF0743B8FA339C2CECC802@ME0P300MB0635.AUSP300.PROD.OUTLOOK.COM Signed-off-by: Greg Kroah-Hartman --- kernel/locking/rtmutex.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 88d08eeb8bc0..fba1229f1de6 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1644,6 +1644,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, } static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, + struct rt_mutex_base *lock, struct rt_mutex_waiter *w) { /* @@ -1656,10 +1657,10 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, if (build_ww_mutex() && w->ww_ctx) return; - /* - * Yell loudly and stop the task right here. - */ + raw_spin_unlock_irq(&lock->wait_lock); + WARN(1, "rtmutex deadlock detected\n"); + while (1) { set_current_state(TASK_INTERRUPTIBLE); rt_mutex_schedule(); @@ -1713,7 +1714,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, } else { __set_current_state(TASK_RUNNING); remove_waiter(lock, waiter); - rt_mutex_handle_deadlock(ret, chwalk, waiter); + rt_mutex_handle_deadlock(ret, chwalk, lock, waiter); } /* From 1863e1f098c5c18d39476d08f53aa5d1640335c5 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 20 Aug 2024 11:42:40 +0300 Subject: [PATCH 027/374] irqchip/riscv-aplic: Fix an IS_ERR() vs NULL bug in probe() commit efe81b7bdf7d882d0ce3d183f1571321046da8f1 upstream. The devm_platform_ioremap_resource() function doesn't return NULL, it returns error pointers. Fix the error handling to match. Fixes: 2333df5ae51e ("irqchip: Add RISC-V advanced PLIC driver for direct-mode") Signed-off-by: Dan Carpenter Signed-off-by: Thomas Gleixner Reviewed-by: Jinjie Ruan Reviewed-by: Anup Patel Cc: stable@vger.kernel.org Link: https://lore.kernel.org/all/a5a628d6-81d8-4933-81a8-64aad4743ec4@stanley.mountain Signed-off-by: Greg Kroah-Hartman --- drivers/irqchip/irq-riscv-aplic-main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c index 774a0c97fdab..362db30ed775 100644 --- a/drivers/irqchip/irq-riscv-aplic-main.c +++ b/drivers/irqchip/irq-riscv-aplic-main.c @@ -175,9 +175,9 @@ static int aplic_probe(struct platform_device *pdev) /* Map the MMIO registers */ regs = devm_platform_ioremap_resource(pdev, 0); - if (!regs) { + if (IS_ERR(regs)) { dev_err(dev, "failed map MMIO registers\n"); - return -ENOMEM; + return PTR_ERR(regs); } /* From bf958898d5f51c3a0eb16bfd8ac6b300fbc79aed Mon Sep 17 00:00:00 2001 From: Georg Gottleuber Date: Tue, 27 Aug 2024 12:41:33 +0200 Subject: [PATCH 028/374] nvme-pci: Add sleep quirk for Samsung 990 Evo commit 61aa894e7a2fda4ee026523b01d07e83ce2abb72 upstream. On some TUXEDO platforms, a Samsung 990 Evo NVMe leads to a high power consumption in s2idle sleep (2-3 watts). This patch applies 'Force No Simple Suspend' quirk to achieve a sleep with a lower power consumption, typically around 0.5 watts. Signed-off-by: Georg Gottleuber Signed-off-by: Werner Sembach Cc: Signed-off-by: Keith Busch Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/pci.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a823330567ff..146d33c4839f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2931,6 +2931,17 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") || dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1")) return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; + } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) { + /* + * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND + * because of high power consumption (> 2 Watt) in s2idle + * sleep. Only some boards with Intel CPU are affected. + */ + if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") || + dmi_match(DMI_BOARD_NAME, "PH4PG31") || + dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") || + dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71")) + return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; } /* From 70c76a9c55ed4db898e784134e251fd791e18e7d Mon Sep 17 00:00:00 2001 From: Boqun Feng Date: Wed, 28 Aug 2024 11:01:29 -0700 Subject: [PATCH 029/374] rust: macros: provide correct provenance when constructing THIS_MODULE commit a5a3c952e82c1ada12bf8c55b73af26f1a454bd2 upstream. Currently while defining `THIS_MODULE` symbol in `module!()`, the pointer used to construct `ThisModule` is derived from an immutable reference of `__this_module`, which means the pointer doesn't have the provenance for writing, and that means any write to that pointer is UB regardless of data races or not. However, the usage of `THIS_MODULE` includes passing this pointer to functions that may write to it (probably in unsafe code), and this will create soundness issues. One way to fix this is using `addr_of_mut!()` but that requires the unstable feature "const_mut_refs". So instead of `addr_of_mut()!`, an extern static `Opaque` is used here: since `Opaque` is transparent to `T`, an extern static `Opaque` will just wrap the C symbol (defined in a C compile unit) in an `Opaque`, which provides a pointer with writable provenance via `Opaque::get()`. This fix the potential UBs because of pointer provenance unmatched. Reported-by: Alice Ryhl Signed-off-by: Boqun Feng Reviewed-by: Alice Ryhl Reviewed-by: Trevor Gross Reviewed-by: Benno Lossin Reviewed-by: Gary Guo Closes: https://rust-for-linux.zulipchat.com/#narrow/stream/x/topic/x/near/465412664 Fixes: 1fbde52bde73 ("rust: add `macros` crate") Cc: stable@vger.kernel.org # 6.6.x: be2ca1e03965: ("rust: types: Make Opaque::get const") Link: https://lore.kernel.org/r/20240828180129.4046355-1-boqun.feng@gmail.com [ Fixed two typos, reworded title. - Miguel ] Signed-off-by: Miguel Ojeda Signed-off-by: Greg Kroah-Hartman --- rust/macros/module.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/rust/macros/module.rs b/rust/macros/module.rs index acd0393b5095..7dee348ef0cc 100644 --- a/rust/macros/module.rs +++ b/rust/macros/module.rs @@ -203,7 +203,11 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { // freed until the module is unloaded. #[cfg(MODULE)] static THIS_MODULE: kernel::ThisModule = unsafe {{ - kernel::ThisModule::from_ptr(&kernel::bindings::__this_module as *const _ as *mut _) + extern \"C\" {{ + static __this_module: kernel::types::Opaque; + }} + + kernel::ThisModule::from_ptr(__this_module.get()) }}; #[cfg(not(MODULE))] static THIS_MODULE: kernel::ThisModule = unsafe {{ From 6f774a3a00ec676aa20f0b984c8181b74bc19d85 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Tue, 27 Aug 2024 14:37:22 -0400 Subject: [PATCH 030/374] Revert "Bluetooth: MGMT/SMP: Fix address type when using SMP over BREDR/LE" commit 532f8bcd1c2c4e8112f62e1922fd1703bc0ffce0 upstream. This reverts commit 59b047bc98084f8af2c41483e4d68a5adf2fa7f7 which breaks compatibility with commands like: bluetoothd[46328]: @ MGMT Command: Load.. (0x0013) plen 74 {0x0001} [hci0] Keys: 2 BR/EDR Address: C0:DC:DA:A5:E5:47 (Samsung Electronics Co.,Ltd) Key type: Authenticated key from P-256 (0x03) Central: 0x00 Encryption size: 16 Diversifier[2]: 0000 Randomizer[8]: 0000000000000000 Key[16]: 6ed96089bd9765be2f2c971b0b95f624 LE Address: D7:2A:DE:1E:73:A2 (Static) Key type: Unauthenticated key from P-256 (0x02) Central: 0x00 Encryption size: 16 Diversifier[2]: 0000 Randomizer[8]: 0000000000000000 Key[16]: 87dd2546ededda380ffcdc0a8faa4597 @ MGMT Event: Command Status (0x0002) plen 3 {0x0001} [hci0] Load Long Term Keys (0x0013) Status: Invalid Parameters (0x0d) Cc: stable@vger.kernel.org Link: https://github.com/bluez/bluez/issues/875 Fixes: 59b047bc9808 ("Bluetooth: MGMT/SMP: Fix address type when using SMP over BREDR/LE") Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Greg Kroah-Hartman --- include/net/bluetooth/hci_core.h | 5 ----- net/bluetooth/mgmt.c | 25 +++++++------------------ net/bluetooth/smp.c | 7 ------- 3 files changed, 7 insertions(+), 30 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index c97ff64c9189..ecb6824e9add 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -186,7 +186,6 @@ struct blocked_key { struct smp_csrk { bdaddr_t bdaddr; u8 bdaddr_type; - u8 link_type; u8 type; u8 val[16]; }; @@ -196,7 +195,6 @@ struct smp_ltk { struct rcu_head rcu; bdaddr_t bdaddr; u8 bdaddr_type; - u8 link_type; u8 authenticated; u8 type; u8 enc_size; @@ -211,7 +209,6 @@ struct smp_irk { bdaddr_t rpa; bdaddr_t bdaddr; u8 addr_type; - u8 link_type; u8 val[16]; }; @@ -219,8 +216,6 @@ struct link_key { struct list_head list; struct rcu_head rcu; bdaddr_t bdaddr; - u8 bdaddr_type; - u8 link_type; u8 type; u8 val[HCI_LINK_KEY_SIZE]; u8 pin_len; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ad4793ea052d..c11cad01af54 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2834,8 +2834,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, for (i = 0; i < key_count; i++) { struct mgmt_link_key_info *key = &cp->keys[i]; - /* Considering SMP over BREDR/LE, there is no need to check addr_type */ - if (key->type > 0x08) + if (key->addr.type != BDADDR_BREDR || key->type > 0x08) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_INVALID_PARAMS); @@ -7073,7 +7072,6 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, for (i = 0; i < irk_count; i++) { struct mgmt_irk_info *irk = &cp->irks[i]; - u8 addr_type = le_addr_type(irk->addr.type); if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, @@ -7083,12 +7081,8 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, continue; } - /* When using SMP over BR/EDR, the addr type should be set to BREDR */ - if (irk->addr.type == BDADDR_BREDR) - addr_type = BDADDR_BREDR; - hci_add_irk(hdev, &irk->addr.bdaddr, - addr_type, irk->val, + le_addr_type(irk->addr.type), irk->val, BDADDR_ANY); } @@ -7169,7 +7163,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, for (i = 0; i < key_count; i++) { struct mgmt_ltk_info *key = &cp->keys[i]; u8 type, authenticated; - u8 addr_type = le_addr_type(key->addr.type); if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, @@ -7204,12 +7197,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, continue; } - /* When using SMP over BR/EDR, the addr type should be set to BREDR */ - if (key->addr.type == BDADDR_BREDR) - addr_type = BDADDR_BREDR; - hci_add_ltk(hdev, &key->addr.bdaddr, - addr_type, type, authenticated, + le_addr_type(key->addr.type), type, authenticated, key->val, key->enc_size, key->ediv, key->rand); } @@ -9457,7 +9446,7 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, ev.store_hint = persistent; bacpy(&ev.key.addr.bdaddr, &key->bdaddr); - ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type); + ev.key.addr.type = BDADDR_BREDR; ev.key.type = key->type; memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); ev.key.pin_len = key->pin_len; @@ -9508,7 +9497,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) ev.store_hint = persistent; bacpy(&ev.key.addr.bdaddr, &key->bdaddr); - ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type); + ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type); ev.key.type = mgmt_ltk_type(key); ev.key.enc_size = key->enc_size; ev.key.ediv = key->ediv; @@ -9537,7 +9526,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent) bacpy(&ev.rpa, &irk->rpa); bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr); - ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type); + ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type); memcpy(ev.irk.val, irk->val, sizeof(irk->val)); mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL); @@ -9566,7 +9555,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, ev.store_hint = persistent; bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr); - ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type); + ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type); ev.key.type = csrk->type; memcpy(ev.key.val, csrk->val, sizeof(csrk->val)); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 4f9fdf400584..8b9724fd752a 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -1060,7 +1060,6 @@ static void smp_notify_keys(struct l2cap_conn *conn) } if (smp->remote_irk) { - smp->remote_irk->link_type = hcon->type; mgmt_new_irk(hdev, smp->remote_irk, persistent); /* Now that user space can be considered to know the @@ -1080,28 +1079,24 @@ static void smp_notify_keys(struct l2cap_conn *conn) } if (smp->csrk) { - smp->csrk->link_type = hcon->type; smp->csrk->bdaddr_type = hcon->dst_type; bacpy(&smp->csrk->bdaddr, &hcon->dst); mgmt_new_csrk(hdev, smp->csrk, persistent); } if (smp->responder_csrk) { - smp->responder_csrk->link_type = hcon->type; smp->responder_csrk->bdaddr_type = hcon->dst_type; bacpy(&smp->responder_csrk->bdaddr, &hcon->dst); mgmt_new_csrk(hdev, smp->responder_csrk, persistent); } if (smp->ltk) { - smp->ltk->link_type = hcon->type; smp->ltk->bdaddr_type = hcon->dst_type; bacpy(&smp->ltk->bdaddr, &hcon->dst); mgmt_new_ltk(hdev, smp->ltk, persistent); } if (smp->responder_ltk) { - smp->responder_ltk->link_type = hcon->type; smp->responder_ltk->bdaddr_type = hcon->dst_type; bacpy(&smp->responder_ltk->bdaddr, &hcon->dst); mgmt_new_ltk(hdev, smp->responder_ltk, persistent); @@ -1121,8 +1116,6 @@ static void smp_notify_keys(struct l2cap_conn *conn) key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst, smp->link_key, type, 0, &persistent); if (key) { - key->link_type = hcon->type; - key->bdaddr_type = hcon->dst_type; mgmt_new_link_key(hdev, key, persistent); /* Don't keep debug keys around if the relevant From 25256e790f9f24f042251fecad25a371cbaaf80e Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Tue, 27 Aug 2024 15:01:34 -0400 Subject: [PATCH 031/374] Bluetooth: MGMT: Ignore keys being loaded with invalid type commit 1e9683c9b6ca88cc9340cdca85edd6134c8cffe3 upstream. Due to 59b047bc98084f8af2c41483e4d68a5adf2fa7f7 there could be keys stored with the wrong address type so this attempt to detect it and ignore them instead of just failing to load all keys. Cc: stable@vger.kernel.org Link: https://github.com/bluez/bluez/issues/875 Fixes: 59b047bc9808 ("Bluetooth: MGMT/SMP: Fix address type when using SMP over BREDR/LE") Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Greg Kroah-Hartman --- net/bluetooth/mgmt.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index c11cad01af54..fa3fa1fde5df 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2831,15 +2831,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys, key_count); - for (i = 0; i < key_count; i++) { - struct mgmt_link_key_info *key = &cp->keys[i]; - - if (key->addr.type != BDADDR_BREDR || key->type > 0x08) - return mgmt_cmd_status(sk, hdev->id, - MGMT_OP_LOAD_LINK_KEYS, - MGMT_STATUS_INVALID_PARAMS); - } - hci_dev_lock(hdev); hci_link_keys_clear(hdev); @@ -2864,6 +2855,19 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, continue; } + if (key->addr.type != BDADDR_BREDR) { + bt_dev_warn(hdev, + "Invalid link address type %u for %pMR", + key->addr.type, &key->addr.bdaddr); + continue; + } + + if (key->type > 0x08) { + bt_dev_warn(hdev, "Invalid link key type %u for %pMR", + key->type, &key->addr.bdaddr); + continue; + } + /* Always ignore debug keys and require a new pairing if * the user wants to use them. */ @@ -7147,15 +7151,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, bt_dev_dbg(hdev, "key_count %u", key_count); - for (i = 0; i < key_count; i++) { - struct mgmt_ltk_info *key = &cp->keys[i]; - - if (!ltk_is_valid(key)) - return mgmt_cmd_status(sk, hdev->id, - MGMT_OP_LOAD_LONG_TERM_KEYS, - MGMT_STATUS_INVALID_PARAMS); - } - hci_dev_lock(hdev); hci_smp_ltks_clear(hdev); @@ -7172,6 +7167,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, continue; } + if (!ltk_is_valid(key)) { + bt_dev_warn(hdev, "Invalid LTK for %pMR", + &key->addr.bdaddr); + continue; + } + switch (key->type) { case MGMT_LTK_UNAUTHENTICATED: authenticated = 0x00; From 129ba129f20bd034ae3fe53f8e9bca11f2b2cbd1 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Fri, 9 Aug 2024 13:25:11 +0500 Subject: [PATCH 032/374] selftests: mm: fix build errors on armhf commit b808f629215685c1941b1cd567c7b7ccb3c90278 upstream. The __NR_mmap isn't found on armhf. The mmap() is commonly available system call and its wrapper is present on all architectures. So it should be used directly. It solves problem for armhf and doesn't create problem for other architectures. Remove sys_mmap() functions as they aren't doing anything else other than calling mmap(). There is no need to set errno = 0 manually as glibc always resets it. For reference errors are as following: CC seal_elf seal_elf.c: In function 'sys_mmap': seal_elf.c:39:33: error: '__NR_mmap' undeclared (first use in this function) 39 | sret = (void *) syscall(__NR_mmap, addr, len, prot, | ^~~~~~~~~ mseal_test.c: In function 'sys_mmap': mseal_test.c:90:33: error: '__NR_mmap' undeclared (first use in this function) 90 | sret = (void *) syscall(__NR_mmap, addr, len, prot, | ^~~~~~~~~ Link: https://lkml.kernel.org/r/20240809082511.497266-1-usama.anjum@collabora.com Fixes: 4926c7a52de7 ("selftest mm/mseal memory sealing") Signed-off-by: Muhammad Usama Anjum Cc: Jeff Xu Cc: Kees Cook Cc: Liam R. Howlett Cc: Shuah Khan Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/mm/mseal_test.c | 37 +++++++++---------------- tools/testing/selftests/mm/seal_elf.c | 13 +-------- 2 files changed, 14 insertions(+), 36 deletions(-) diff --git a/tools/testing/selftests/mm/mseal_test.c b/tools/testing/selftests/mm/mseal_test.c index 41998cf1dcf5..09faffbc3d87 100644 --- a/tools/testing/selftests/mm/mseal_test.c +++ b/tools/testing/selftests/mm/mseal_test.c @@ -128,17 +128,6 @@ static int sys_mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot, return sret; } -static void *sys_mmap(void *addr, unsigned long len, unsigned long prot, - unsigned long flags, unsigned long fd, unsigned long offset) -{ - void *sret; - - errno = 0; - sret = (void *) syscall(__NR_mmap, addr, len, prot, - flags, fd, offset); - return sret; -} - static int sys_munmap(void *ptr, size_t size) { int sret; @@ -219,7 +208,7 @@ static void setup_single_address(int size, void **ptrOut) { void *ptr; - ptr = sys_mmap(NULL, size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + ptr = mmap(NULL, size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); *ptrOut = ptr; } @@ -228,7 +217,7 @@ static void setup_single_address_rw(int size, void **ptrOut) void *ptr; unsigned long mapflags = MAP_ANONYMOUS | MAP_PRIVATE; - ptr = sys_mmap(NULL, size, PROT_READ | PROT_WRITE, mapflags, -1, 0); + ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, mapflags, -1, 0); *ptrOut = ptr; } @@ -252,7 +241,7 @@ bool seal_support(void) void *ptr; unsigned long page_size = getpagesize(); - ptr = sys_mmap(NULL, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + ptr = mmap(NULL, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (ptr == (void *) -1) return false; @@ -528,8 +517,8 @@ static void test_seal_zero_address(void) int prot; /* use mmap to change protection. */ - ptr = sys_mmap(0, size, PROT_NONE, - MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); + ptr = mmap(0, size, PROT_NONE, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); FAIL_TEST_IF_FALSE(ptr == 0); size = get_vma_size(ptr, &prot); @@ -1256,8 +1245,8 @@ static void test_seal_mmap_overwrite_prot(bool seal) } /* use mmap to change protection. */ - ret2 = sys_mmap(ptr, size, PROT_NONE, - MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); + ret2 = mmap(ptr, size, PROT_NONE, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); if (seal) { FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED); FAIL_TEST_IF_FALSE(errno == EPERM); @@ -1287,8 +1276,8 @@ static void test_seal_mmap_expand(bool seal) } /* use mmap to expand. */ - ret2 = sys_mmap(ptr, size, PROT_READ, - MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); + ret2 = mmap(ptr, size, PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); if (seal) { FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED); FAIL_TEST_IF_FALSE(errno == EPERM); @@ -1315,8 +1304,8 @@ static void test_seal_mmap_shrink(bool seal) } /* use mmap to shrink. */ - ret2 = sys_mmap(ptr, 8 * page_size, PROT_READ, - MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); + ret2 = mmap(ptr, 8 * page_size, PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); if (seal) { FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED); FAIL_TEST_IF_FALSE(errno == EPERM); @@ -1697,7 +1686,7 @@ static void test_seal_discard_ro_anon_on_filebacked(bool seal) ret = fallocate(fd, 0, 0, size); FAIL_TEST_IF_FALSE(!ret); - ptr = sys_mmap(NULL, size, PROT_READ, mapflags, fd, 0); + ptr = mmap(NULL, size, PROT_READ, mapflags, fd, 0); FAIL_TEST_IF_FALSE(ptr != MAP_FAILED); if (seal) { @@ -1727,7 +1716,7 @@ static void test_seal_discard_ro_anon_on_shared(bool seal) int ret; unsigned long mapflags = MAP_ANONYMOUS | MAP_SHARED; - ptr = sys_mmap(NULL, size, PROT_READ, mapflags, -1, 0); + ptr = mmap(NULL, size, PROT_READ, mapflags, -1, 0); FAIL_TEST_IF_FALSE(ptr != (void *)-1); if (seal) { diff --git a/tools/testing/selftests/mm/seal_elf.c b/tools/testing/selftests/mm/seal_elf.c index f2babec79bb6..07727d4892ac 100644 --- a/tools/testing/selftests/mm/seal_elf.c +++ b/tools/testing/selftests/mm/seal_elf.c @@ -61,17 +61,6 @@ static int sys_mseal(void *start, size_t len) return sret; } -static void *sys_mmap(void *addr, unsigned long len, unsigned long prot, - unsigned long flags, unsigned long fd, unsigned long offset) -{ - void *sret; - - errno = 0; - sret = (void *) syscall(__NR_mmap, addr, len, prot, - flags, fd, offset); - return sret; -} - static inline int sys_mprotect(void *ptr, size_t size, unsigned long prot) { int sret; @@ -87,7 +76,7 @@ static bool seal_support(void) void *ptr; unsigned long page_size = getpagesize(); - ptr = sys_mmap(NULL, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + ptr = mmap(NULL, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (ptr == (void *) -1) return false; From 202c58db7a9c68a9a38ee55b900f643259af9af4 Mon Sep 17 00:00:00 2001 From: Jonathan Bell Date: Wed, 21 Aug 2024 08:06:31 +0900 Subject: [PATCH 033/374] mmc: core: apply SD quirks earlier during probe commit 469e5e4713989fdd5e3e502b922e7be0da2464b9 upstream. Applying MMC_QUIRK_BROKEN_SD_CACHE is broken, as the card's SD quirks are referenced in sd_parse_ext_reg_perf() prior to the quirks being initialized in mmc_blk_probe(). To fix this problem, let's split out an SD-specific list of quirks and apply in mmc_sd_init_card() instead. In this way, sd_read_ext_regs() to has the available information for not assigning the SD_EXT_PERF_CACHE as one of the (un)supported features, which in turn allows mmc_sd_init_card() to properly skip execution of sd_enable_cache(). Fixes: c467c8f08185 ("mmc: Add MMC_QUIRK_BROKEN_SD_CACHE for Kingston Canvas Go Plus from 11/2019") Signed-off-by: Jonathan Bell Co-developed-by: Keita Aihara Signed-off-by: Keita Aihara Reviewed-by: Dragan Simic Reviewed-by: Avri Altman Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20240820230631.GA436523@sony.com Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/core/quirks.h | 22 +++++++++++++--------- drivers/mmc/core/sd.c | 4 ++++ 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index cca71867bc4a..92905fc46436 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -15,6 +15,19 @@ #include "card.h" +static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = { + /* + * Kingston Canvas Go! Plus microSD cards never finish SD cache flush. + * This has so far only been observed on cards from 11/2019, while new + * cards from 2023/05 do not exhibit this behavior. + */ + _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11, + 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd, + MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY), + + END_FIXUP +}; + static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = { #define INAND_CMD38_ARG_EXT_CSD 113 #define INAND_CMD38_ARG_ERASE 0x00 @@ -53,15 +66,6 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = { MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), - /* - * Kingston Canvas Go! Plus microSD cards never finish SD cache flush. - * This has so far only been observed on cards from 11/2019, while new - * cards from 2023/05 do not exhibit this behavior. - */ - _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11, - 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd, - MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY), - /* * Some SD cards lockup while using CMD23 multiblock transfers. */ diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 1c8148cdda50..ee37ad14e79e 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -26,6 +26,7 @@ #include "host.h" #include "bus.h" #include "mmc_ops.h" +#include "quirks.h" #include "sd.h" #include "sd_ops.h" @@ -1475,6 +1476,9 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, goto free_card; } + /* Apply quirks prior to card setup */ + mmc_fixup_device(card, mmc_sd_fixups); + err = mmc_sd_setup_card(host, card, oldcard != NULL); if (err) goto free_card; From 5b1ef10f7d49f3320b0faa894204259e590ce588 Mon Sep 17 00:00:00 2001 From: Sam Protsenko Date: Wed, 6 Mar 2024 17:20:52 -0600 Subject: [PATCH 034/374] mmc: dw_mmc: Fix IDMAC operation with pages bigger than 4K commit 8396c793ffdf28bb8aee7cfe0891080f8cab7890 upstream. Commit 616f87661792 ("mmc: pass queue_limits to blk_mq_alloc_disk") [1] revealed the long living issue in dw_mmc.c driver, existing since the time when it was first introduced in commit f95f3850f7a9 ("mmc: dw_mmc: Add Synopsys DesignWare mmc host driver."), also making kernel boot broken on platforms using dw_mmc driver with 16K or 64K pages enabled, with this message in dmesg: mmcblk: probe of mmc0:0001 failed with error -22 That's happening because mmc_blk_probe() fails when it calls blk_validate_limits() consequently, which returns the error due to failed max_segment_size check in this code: /* * The maximum segment size has an odd historic 64k default that * drivers probably should override. Just like the I/O size we * require drivers to at least handle a full page per segment. */ ... if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE)) return -EINVAL; In case when IDMAC (Internal DMA Controller) is used, dw_mmc.c always sets .max_seg_size to 4 KiB: mmc->max_seg_size = 0x1000; The comment in the code above explains why it's incorrect. Arnd suggested setting .max_seg_size to .max_req_size to fix it, which is also what some other drivers are doing: $ grep -rl 'max_seg_size.*=.*max_req_size' drivers/mmc/host/ | \ wc -l 18 This change is not only fixing the boot with 16K/64K pages, but also leads to a better MMC performance. The linear write performance was tested on E850-96 board (eMMC only), before commit [1] (where it's possible to boot with 16K/64K pages without this fix, to be able to do a comparison). It was tested with this command: # dd if=/dev/zero of=somefile bs=1M count=500 oflag=sync Test results are as follows: - 4K pages, .max_seg_size = 4 KiB: 94.2 MB/s - 4K pages, .max_seg_size = .max_req_size = 512 KiB: 96.9 MB/s - 16K pages, .max_seg_size = 4 KiB: 126 MB/s - 16K pages, .max_seg_size = .max_req_size = 2 MiB: 128 MB/s - 64K pages, .max_seg_size = 4 KiB: 138 MB/s - 64K pages, .max_seg_size = .max_req_size = 8 MiB: 138 MB/s Unfortunately, SD card controller is not enabled in E850-96 yet, so it wasn't possible for me to run the test on some cheap SD cards to check this patch's impact on those. But it's possible that this change might also reduce the writes count, thus improving SD/eMMC longevity. All credit for the analysis and the suggested solution goes to Arnd. [1] https://lore.kernel.org/all/20240215070300.2200308-18-hch@lst.de/ Fixes: f95f3850f7a9 ("mmc: dw_mmc: Add Synopsys DesignWare mmc host driver.") Suggested-by: Arnd Bergmann Reported-by: Linux Kernel Functional Testing Closes: https://lore.kernel.org/all/CA+G9fYtddf2Fd3be+YShHP6CmSDNcn0ptW8qg+stUKW+Cn0rjQ@mail.gmail.com/ Signed-off-by: Sam Protsenko Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20240306232052.21317-1-semen.protsenko@linaro.org Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/dw_mmc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index a4f813ea177a..c27e9ab9ff82 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2951,8 +2951,8 @@ static int dw_mci_init_slot(struct dw_mci *host) if (host->use_dma == TRANS_MODE_IDMAC) { mmc->max_segs = host->ring_size; mmc->max_blk_size = 65535; - mmc->max_seg_size = 0x1000; - mmc->max_req_size = mmc->max_seg_size * host->ring_size; + mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size; + mmc->max_seg_size = mmc->max_req_size; mmc->max_blk_count = mmc->max_req_size / 512; } else if (host->use_dma == TRANS_MODE_EDMAC) { mmc->max_segs = 64; From 0e8df0942de01b2bdde04b5db280d72ed10b1af7 Mon Sep 17 00:00:00 2001 From: Liao Chen Date: Mon, 26 Aug 2024 12:48:51 +0000 Subject: [PATCH 035/374] mmc: sdhci-of-aspeed: fix module autoloading commit 6e540da4c1db7b840e347c4dfe48359b18b7e376 upstream. Add MODULE_DEVICE_TABLE(), so modules could be properly autoloaded based on the alias from of_device_id table. Signed-off-by: Liao Chen Acked-by: Andrew Jeffery Fixes: bb7b8ec62dfb ("mmc: sdhci-of-aspeed: Add support for the ASPEED SD controller") Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20240826124851.379759-1-liaochen4@huawei.com Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/sdhci-of-aspeed.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c index 430c1f90037b..37240895ffaa 100644 --- a/drivers/mmc/host/sdhci-of-aspeed.c +++ b/drivers/mmc/host/sdhci-of-aspeed.c @@ -510,6 +510,7 @@ static const struct of_device_id aspeed_sdhci_of_match[] = { { .compatible = "aspeed,ast2600-sdhci", .data = &ast2600_sdhci_pdata, }, { } }; +MODULE_DEVICE_TABLE(of, aspeed_sdhci_of_match); static struct platform_driver aspeed_sdhci_driver = { .driver = { From 73279b3c312fcbd7e7bd77a46ea881f149b62554 Mon Sep 17 00:00:00 2001 From: Seunghwan Baek Date: Thu, 29 Aug 2024 15:18:22 +0900 Subject: [PATCH 036/374] mmc: cqhci: Fix checking of CQHCI_HALT state commit aea62c744a9ae2a8247c54ec42138405216414da upstream. To check if mmc cqe is in halt state, need to check set/clear of CQHCI_HALT bit. At this time, we need to check with &, not &&. Fixes: a4080225f51d ("mmc: cqhci: support for command queue enabled host") Cc: stable@vger.kernel.org Signed-off-by: Seunghwan Baek Reviewed-by: Ritesh Harjani Acked-by: Adrian Hunter Link: https://lore.kernel.org/r/20240829061823.3718-2-sh8267.baek@samsung.com Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/cqhci-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c index c14d7251d0bb..a02da26a1efd 100644 --- a/drivers/mmc/host/cqhci-core.c +++ b/drivers/mmc/host/cqhci-core.c @@ -617,7 +617,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) cqhci_writel(cq_host, 0, CQHCI_CTL); mmc->cqe_on = true; pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc)); - if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) { + if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) { pr_err("%s: cqhci: CQE failed to exit halt state\n", mmc_hostname(mmc)); } From 7a0b9c73ec42f64c3a465b296e8434ddd80d29eb Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 26 Aug 2024 14:19:04 -0700 Subject: [PATCH 037/374] fuse: update stats for pages in dropped aux writeback list commit f7790d67785302b3116bbbfda62a5a44524601a3 upstream. In the case where the aux writeback list is dropped (e.g. the pages have been truncated or the connection is broken), the stats for its pages and backing device info need to be updated as well. Fixes: e2653bd53a98 ("fuse: fix leaked aux requests") Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Cc: # v5.1 Signed-off-by: Miklos Szeredi Signed-off-by: Greg Kroah-Hartman --- fs/fuse/file.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index f39456c65ed7..ed76121f73f2 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1832,10 +1832,16 @@ __acquires(fi->lock) fuse_writepage_finish(fm, wpa); spin_unlock(&fi->lock); - /* After fuse_writepage_finish() aux request list is private */ + /* After rb_erase() aux request list is private */ for (aux = wpa->next; aux; aux = next) { + struct backing_dev_info *bdi = inode_to_bdi(aux->inode); + next = aux->next; aux->next = NULL; + + dec_wb_stat(&bdi->wb, WB_WRITEBACK); + dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP); + wb_writeout_inc(&bdi->wb); fuse_writepage_free(aux); } From 58020fe8835254e926cc4dd55a1a7f4e7ecd5129 Mon Sep 17 00:00:00 2001 From: Bernd Schubert Date: Wed, 3 Jul 2024 19:30:20 +0200 Subject: [PATCH 038/374] fuse: disable the combination of passthrough and writeback cache commit 3ab394b363c5fd14b231e335fb6746ddfb93aaaa upstream. Current design and handling of passthrough is without fuse caching and with that FUSE_WRITEBACK_CACHE is conflicting. Fixes: 7dc4e97a4f9a ("fuse: introduce FUSE_PASSTHROUGH capability") Cc: stable@kernel.org # v6.9 Signed-off-by: Bernd Schubert Acked-by: Amir Goldstein Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Greg Kroah-Hartman --- fs/fuse/inode.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 32fe6fa72f46..57863e773982 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1336,11 +1336,16 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, * on a stacked fs (e.g. overlayfs) themselves and with * max_stack_depth == 1, FUSE fs can be stacked as the * underlying fs of a stacked fs (e.g. overlayfs). + * + * Also don't allow the combination of FUSE_PASSTHROUGH + * and FUSE_WRITEBACK_CACHE, current design doesn't handle + * them together. */ if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH) && (flags & FUSE_PASSTHROUGH) && arg->max_stack_depth > 0 && - arg->max_stack_depth <= FILESYSTEM_MAX_STACK_DEPTH) { + arg->max_stack_depth <= FILESYSTEM_MAX_STACK_DEPTH && + !(flags & FUSE_WRITEBACK_CACHE)) { fc->passthrough = 1; fc->max_stack_depth = arg->max_stack_depth; fm->sb->s_stack_depth = arg->max_stack_depth; From 55c0b50148f776d15c8b844e7255a6c258ee9ec2 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 25 Jul 2024 10:53:34 -0700 Subject: [PATCH 039/374] fuse: check aborted connection before adding requests to pending list for resending commit 97f30876c94382d1b01d45c2c76be8911b196527 upstream. There is a race condition where inflight requests will not be aborted if they are in the middle of being re-sent when the connection is aborted. If fuse_resend has already moved all the requests in the fpq->processing lists to its private queue ("to_queue") and then the connection starts and finishes aborting, these requests will be added to the pending queue and remain on it indefinitely. Fixes: 760eac73f9f6 ("fuse: Introduce a new notification type for resend pending requests") Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Reviewed-by: Jingbo Xu Cc: # v6.9 Signed-off-by: Miklos Szeredi Signed-off-by: Greg Kroah-Hartman --- fs/fuse/dev.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 7146038b2fe7..cdd4e02b691e 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -31,6 +31,8 @@ MODULE_ALIAS("devname:fuse"); static struct kmem_cache *fuse_req_cachep; +static void end_requests(struct list_head *head); + static struct fuse_dev *fuse_get_dev(struct file *file) { /* @@ -1822,6 +1824,13 @@ static void fuse_resend(struct fuse_conn *fc) } spin_lock(&fiq->lock); + if (!fiq->connected) { + spin_unlock(&fiq->lock); + list_for_each_entry(req, &to_queue, list) + clear_bit(FR_PENDING, &req->flags); + end_requests(&to_queue); + return; + } /* iq and pq requests are both oldest to newest */ list_splice(&to_queue, &fiq->pending); fiq->ops->wake_pending_and_unlock(fiq); From c5a56f3000a90ce9e9070671cd794c7d170a4448 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Mon, 19 Aug 2024 19:52:30 +0200 Subject: [PATCH 040/374] fuse: use unsigned type for getxattr/listxattr size truncation commit b18915248a15eae7d901262f108d6ff0ffb4ffc1 upstream. The existing code uses min_t(ssize_t, outarg.size, XATTR_LIST_MAX) when parsing the FUSE daemon's response to a zero-length getxattr/listxattr request. On 32-bit kernels, where ssize_t and outarg.size are the same size, this is wrong: The min_t() will pass through any size values that are negative when interpreted as signed. fuse_listxattr() will then return this userspace-supplied negative value, which callers will treat as an error value. This kind of bug pattern can lead to fairly bad security bugs because of how error codes are used in the Linux kernel. If a caller were to convert the numeric error into an error pointer, like so: struct foo *func(...) { int len = fuse_getxattr(..., NULL, 0); if (len < 0) return ERR_PTR(len); ... } then it would end up returning this userspace-supplied negative value cast to a pointer - but the caller of this function wouldn't recognize it as an error pointer (IS_ERR_VALUE() only detects values in the narrow range in which legitimate errno values are), and so it would just be treated as a kernel pointer. I think there is at least one theoretical codepath where this could happen, but that path would involve virtio-fs with submounts plus some weird SELinux configuration, so I think it's probably not a concern in practice. Cc: stable@vger.kernel.org # v4.9 Fixes: 63401ccdb2ca ("fuse: limit xattr returned size") Signed-off-by: Jann Horn Signed-off-by: Miklos Szeredi Signed-off-by: Greg Kroah-Hartman --- fs/fuse/xattr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c index 5b423fdbb13f..9f568d345c51 100644 --- a/fs/fuse/xattr.c +++ b/fs/fuse/xattr.c @@ -81,7 +81,7 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value, } ret = fuse_simple_request(fm, &args); if (!ret && !size) - ret = min_t(ssize_t, outarg.size, XATTR_SIZE_MAX); + ret = min_t(size_t, outarg.size, XATTR_SIZE_MAX); if (ret == -ENOSYS) { fm->fc->no_getxattr = 1; ret = -EOPNOTSUPP; @@ -143,7 +143,7 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size) } ret = fuse_simple_request(fm, &args); if (!ret && !size) - ret = min_t(ssize_t, outarg.size, XATTR_LIST_MAX); + ret = min_t(size_t, outarg.size, XATTR_LIST_MAX); if (ret > 0 && size) ret = fuse_verify_xattr_list(list, ret); if (ret == -ENOSYS) { From b39cea3a5a93d2d7c6475b7ebf33a5f5cfe6a2ca Mon Sep 17 00:00:00 2001 From: yangyun Date: Fri, 23 Aug 2024 16:51:46 +0800 Subject: [PATCH 041/374] fuse: fix memory leak in fuse_create_open commit 3002240d16494d798add0575e8ba1f284258ab34 upstream. The memory of struct fuse_file is allocated but not freed when get_create_ext return error. Fixes: 3e2b6fdbdc9a ("fuse: send security context of inode on file") Cc: stable@vger.kernel.org # v5.17 Signed-off-by: yangyun Signed-off-by: Miklos Szeredi Signed-off-by: Greg Kroah-Hartman --- fs/fuse/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 2b0d4781f394..8e96df9fd76c 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -670,7 +670,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, err = get_create_ext(&args, dir, entry, mode); if (err) - goto out_put_forget_req; + goto out_free_ff; err = fuse_simple_request(fm, &args); free_ext_value(&args); From 2a5f32223e8ffb63904861c63d77af679689fd7e Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 28 Aug 2024 15:55:17 +0200 Subject: [PATCH 042/374] fuse: clear PG_uptodate when using a stolen page MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 76a51ac00ca2a72fe3e168b7fb0e70f75ba6f512 upstream. Originally when a stolen page was inserted into fuse's page cache by fuse_try_move_page(), it would be marked uptodate. Then fuse_readpages_end() would call SetPageUptodate() again on the already uptodate page. Commit 413e8f014c8b ("fuse: Convert fuse_readpages_end() to use folio_end_read()") changed that by replacing the SetPageUptodate() + unlock_page() combination with folio_end_read(), which does mostly the same, except it sets the uptodate flag with an xor operation, which in the above scenario resulted in the uptodate flag being cleared, which in turn resulted in EIO being returned on the read. Fix by clearing PG_uptodate instead of setting it in fuse_try_move_page(), conforming to the expectation of folio_end_read(). Reported-by: Jürg Billeter Debugged-by: Matthew Wilcox Fixes: 413e8f014c8b ("fuse: Convert fuse_readpages_end() to use folio_end_read()") Cc: # v6.10 Signed-off-by: Miklos Szeredi Signed-off-by: Greg Kroah-Hartman --- fs/fuse/dev.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index cdd4e02b691e..f0c9cd1a0b39 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -775,7 +775,6 @@ static int fuse_check_folio(struct folio *folio) (folio->flags & PAGE_FLAGS_CHECK_AT_PREP & ~(1 << PG_locked | 1 << PG_referenced | - 1 << PG_uptodate | 1 << PG_lru | 1 << PG_active | 1 << PG_workingset | @@ -820,9 +819,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) newfolio = page_folio(buf->page); - if (!folio_test_uptodate(newfolio)) - folio_mark_uptodate(newfolio); - + folio_clear_uptodate(newfolio); folio_clear_mappedtodisk(newfolio); if (fuse_check_folio(newfolio) != 0) From 85cda5b040bda9c577b34eb72d5b2e5b7e31985c Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 23 Aug 2024 09:42:17 +0200 Subject: [PATCH 043/374] ASoC: Intel: Boards: Fix NULL pointer deref in BYT/CHT boards harder commit 0cc65482f5b03ac2b1c240bc34665e43ea2d71bb upstream. Since commit 13f58267cda3 ("ASoC: soc.h: don't create dummy Component via COMP_DUMMY()") dummy codecs declared like this: SND_SOC_DAILINK_DEF(dummy, DAILINK_COMP_ARRAY(COMP_DUMMY())); expand to: static struct snd_soc_dai_link_component dummy[] = { }; Which means that dummy is a zero sized array and thus dais[i].codecs should not be dereferenced *at all* since it points to the address of the next variable stored in the data section as the "dummy" variable has an address but no size, so even dereferencing dais[0] is already an out of bounds array reference. Which means that the if (dais[i].codecs->name) check added in commit 7d99a70b6595 ("ASoC: Intel: Boards: Fix NULL pointer deref in BYT/CHT boards") relies on that the part of the next variable which the name member maps to just happens to be NULL. Which apparently so far it usually is, except when it isn't and then it results in crashes like this one: [ 28.795659] BUG: unable to handle page fault for address: 0000000000030011 ... [ 28.795780] Call Trace: [ 28.795787] ... [ 28.795862] ? strcmp+0x18/0x40 [ 28.795872] 0xffffffffc150c605 [ 28.795887] platform_probe+0x40/0xa0 ... [ 28.795979] ? __pfx_init_module+0x10/0x10 [snd_soc_sst_bytcr_wm5102] Really fix things this time around by checking dais.num_codecs != 0. Fixes: 7d99a70b6595 ("ASoC: Intel: Boards: Fix NULL pointer deref in BYT/CHT boards") Cc: stable@vger.kernel.org Signed-off-by: Hans de Goede Reviewed-by: Pierre-Louis Bossart Link: https://patch.msgid.link/20240823074217.14653-1-hdegoede@redhat.com Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/intel/boards/bxt_rt298.c | 2 +- sound/soc/intel/boards/bytcht_cx2072x.c | 2 +- sound/soc/intel/boards/bytcht_da7213.c | 2 +- sound/soc/intel/boards/bytcht_es8316.c | 2 +- sound/soc/intel/boards/bytcr_rt5640.c | 2 +- sound/soc/intel/boards/bytcr_rt5651.c | 2 +- sound/soc/intel/boards/bytcr_wm5102.c | 2 +- sound/soc/intel/boards/cht_bsw_rt5645.c | 2 +- sound/soc/intel/boards/cht_bsw_rt5672.c | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c index dce6a2086f2a..6da1517c53c6 100644 --- a/sound/soc/intel/boards/bxt_rt298.c +++ b/sound/soc/intel/boards/bxt_rt298.c @@ -605,7 +605,7 @@ static int broxton_audio_probe(struct platform_device *pdev) int i; for (i = 0; i < ARRAY_SIZE(broxton_rt298_dais); i++) { - if (card->dai_link[i].codecs->name && + if (card->dai_link[i].num_codecs && !strncmp(card->dai_link[i].codecs->name, "i2c-INT343A:00", I2C_NAME_SIZE)) { if (!strncmp(card->name, "broxton-rt298", diff --git a/sound/soc/intel/boards/bytcht_cx2072x.c b/sound/soc/intel/boards/bytcht_cx2072x.c index c014d85a08b2..df3c2a7b64d2 100644 --- a/sound/soc/intel/boards/bytcht_cx2072x.c +++ b/sound/soc/intel/boards/bytcht_cx2072x.c @@ -241,7 +241,7 @@ static int snd_byt_cht_cx2072x_probe(struct platform_device *pdev) /* fix index of codec dai */ for (i = 0; i < ARRAY_SIZE(byt_cht_cx2072x_dais); i++) { - if (byt_cht_cx2072x_dais[i].codecs->name && + if (byt_cht_cx2072x_dais[i].num_codecs && !strcmp(byt_cht_cx2072x_dais[i].codecs->name, "i2c-14F10720:00")) { dai_index = i; diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c index f4ac3ddd148b..08c598b7e1ee 100644 --- a/sound/soc/intel/boards/bytcht_da7213.c +++ b/sound/soc/intel/boards/bytcht_da7213.c @@ -245,7 +245,7 @@ static int bytcht_da7213_probe(struct platform_device *pdev) /* fix index of codec dai */ for (i = 0; i < ARRAY_SIZE(dailink); i++) { - if (dailink[i].codecs->name && + if (dailink[i].num_codecs && !strcmp(dailink[i].codecs->name, "i2c-DLGS7213:00")) { dai_index = i; break; diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index 2fcec2e02bb5..77b91ea4dc32 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -546,7 +546,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev) /* fix index of codec dai */ for (i = 0; i < ARRAY_SIZE(byt_cht_es8316_dais); i++) { - if (byt_cht_es8316_dais[i].codecs->name && + if (byt_cht_es8316_dais[i].num_codecs && !strcmp(byt_cht_es8316_dais[i].codecs->name, "i2c-ESSX8316:00")) { dai_index = i; diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index a64d1989e28a..db4a33680d94 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -1677,7 +1677,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev) /* fix index of codec dai */ for (i = 0; i < ARRAY_SIZE(byt_rt5640_dais); i++) { - if (byt_rt5640_dais[i].codecs->name && + if (byt_rt5640_dais[i].num_codecs && !strcmp(byt_rt5640_dais[i].codecs->name, "i2c-10EC5640:00")) { dai_index = i; diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c index 80c841b000a3..8514b79f389b 100644 --- a/sound/soc/intel/boards/bytcr_rt5651.c +++ b/sound/soc/intel/boards/bytcr_rt5651.c @@ -910,7 +910,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev) /* fix index of codec dai */ for (i = 0; i < ARRAY_SIZE(byt_rt5651_dais); i++) { - if (byt_rt5651_dais[i].codecs->name && + if (byt_rt5651_dais[i].num_codecs && !strcmp(byt_rt5651_dais[i].codecs->name, "i2c-10EC5651:00")) { dai_index = i; diff --git a/sound/soc/intel/boards/bytcr_wm5102.c b/sound/soc/intel/boards/bytcr_wm5102.c index cccb5e90c0fe..e5a7cc606aa9 100644 --- a/sound/soc/intel/boards/bytcr_wm5102.c +++ b/sound/soc/intel/boards/bytcr_wm5102.c @@ -605,7 +605,7 @@ static int snd_byt_wm5102_mc_probe(struct platform_device *pdev) /* find index of codec dai */ for (i = 0; i < ARRAY_SIZE(byt_wm5102_dais); i++) { - if (byt_wm5102_dais[i].codecs->name && + if (byt_wm5102_dais[i].num_codecs && !strcmp(byt_wm5102_dais[i].codecs->name, "wm5102-codec")) { dai_index = i; diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c index eb41b7115d01..1da9ceee4d59 100644 --- a/sound/soc/intel/boards/cht_bsw_rt5645.c +++ b/sound/soc/intel/boards/cht_bsw_rt5645.c @@ -569,7 +569,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev) /* set correct codec name */ for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) - if (cht_dailink[i].codecs->name && + if (cht_dailink[i].num_codecs && !strcmp(cht_dailink[i].codecs->name, "i2c-10EC5645:00")) { dai_index = i; diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c index be2d1a8dbca8..d68e5bc755de 100644 --- a/sound/soc/intel/boards/cht_bsw_rt5672.c +++ b/sound/soc/intel/boards/cht_bsw_rt5672.c @@ -466,7 +466,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev) /* find index of codec dai */ for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) { - if (cht_dailink[i].codecs->name && + if (cht_dailink[i].num_codecs && !strcmp(cht_dailink[i].codecs->name, RT5672_I2C_DEFAULT)) { dai_index = i; break; From a3b6ff6c896aee5ef9b581e40d0045ff04fcbc8c Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 14 Aug 2024 17:57:03 -0700 Subject: [PATCH 044/374] riscv: misaligned: Restrict user access to kernel memory commit b686ecdeacf6658e1348c1a32a08e2e72f7c0f00 upstream. raw_copy_{to,from}_user() do not call access_ok(), so this code allowed userspace to access any virtual memory address. Cc: stable@vger.kernel.org Fixes: 7c83232161f6 ("riscv: add support for misaligned trap handling in S-mode") Fixes: 441381506ba7 ("riscv: misaligned: remove CONFIG_RISCV_M_MODE specific code") Signed-off-by: Samuel Holland Reviewed-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20240815005714.1163136-1-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt Signed-off-by: Greg Kroah-Hartman --- arch/riscv/kernel/traps_misaligned.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index b62d5a2f4541..1a76f99ff185 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -417,7 +417,7 @@ int handle_misaligned_load(struct pt_regs *regs) val.data_u64 = 0; if (user_mode(regs)) { - if (raw_copy_from_user(&val, (u8 __user *)addr, len)) + if (copy_from_user(&val, (u8 __user *)addr, len)) return -1; } else { memcpy(&val, (u8 *)addr, len); @@ -515,7 +515,7 @@ int handle_misaligned_store(struct pt_regs *regs) return -EOPNOTSUPP; if (user_mode(regs)) { - if (raw_copy_to_user((u8 __user *)addr, &val, len)) + if (copy_to_user((u8 __user *)addr, &val, len)) return -1; } else { memcpy((u8 *)addr, &val, len); From c37c00cf3d03446d50318ba7487bf3f4a323ddd9 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sat, 31 Aug 2024 14:02:06 +0200 Subject: [PATCH 045/374] parisc: Delay write-protection until mark_rodata_ro() call commit 213aa670153ed675a007c1f35c5db544b0fefc94 upstream. Do not write-protect the kernel read-only and __ro_after_init sections earlier than before mark_rodata_ro() is called. This fixes a boot issue on parisc which is triggered by commit 91a1d97ef482 ("jump_label,module: Don't alloc static_key_mod for __ro_after_init keys"). That commit may modify static key contents in the __ro_after_init section at bootup, so this section needs to be writable at least until mark_rodata_ro() is called. Signed-off-by: Helge Deller Reported-by: matoro Reported-by: Christoph Biedl Tested-by: Christoph Biedl Link: https://lore.kernel.org/linux-parisc/096cad5aada514255cd7b0b9dbafc768@matoro.tk/#r Fixes: 91a1d97ef482 ("jump_label,module: Don't alloc static_key_mod for __ro_after_init keys") Cc: stable@vger.kernel.org # v6.10+ Signed-off-by: Greg Kroah-Hartman --- arch/parisc/mm/init.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 34d91cb8b259..96970fa75e4a 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -459,7 +459,6 @@ void free_initmem(void) unsigned long kernel_end = (unsigned long)&_end; /* Remap kernel text and data, but do not touch init section yet. */ - kernel_set_to_readonly = true; map_pages(init_end, __pa(init_end), kernel_end - init_end, PAGE_KERNEL, 0); @@ -493,11 +492,18 @@ void free_initmem(void) #ifdef CONFIG_STRICT_KERNEL_RWX void mark_rodata_ro(void) { - /* rodata memory was already mapped with KERNEL_RO access rights by - pagetable_init() and map_pages(). No need to do additional stuff here */ - unsigned long roai_size = __end_ro_after_init - __start_ro_after_init; + unsigned long start = (unsigned long) &__start_rodata; + unsigned long end = (unsigned long) &__end_rodata; + + pr_info("Write protecting the kernel read-only data: %luk\n", + (end - start) >> 10); + + kernel_set_to_readonly = true; + map_pages(start, __pa(start), end - start, PAGE_KERNEL, 0); - pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10); + /* force the kernel to see the new page table entries */ + flush_cache_all(); + flush_tlb_all(); } #endif From d81ef42faf969e7d401a914e01dfeafb583f6077 Mon Sep 17 00:00:00 2001 From: Xingyu Wu Date: Mon, 26 Aug 2024 16:04:29 +0800 Subject: [PATCH 046/374] clk: starfive: jh7110-sys: Add notifier for PLL0 clock commit 538d5477b25289ac5d46ca37b9e5b4d685cbe019 upstream. Add notifier function for PLL0 clock. In the function, the cpu_root clock should be operated by saving its current parent and setting a new safe parent (osc clock) before setting the PLL0 clock rate. After setting PLL0 rate, it should be switched back to the original parent clock. Fixes: e2c510d6d630 ("riscv: dts: starfive: Add cpu scaling for JH7110 SoC") Cc: stable@vger.kernel.org Reviewed-by: Emil Renner Berthing Signed-off-by: Xingyu Wu Link: https://lore.kernel.org/r/20240826080430.179788-2-xingyu.wu@starfivetech.com Reviewed-by: Hal Feng Tested-by: Michael Jeanson Signed-off-by: Stephen Boyd Signed-off-by: Greg Kroah-Hartman --- .../clk/starfive/clk-starfive-jh7110-sys.c | 31 ++++++++++++++++++- drivers/clk/starfive/clk-starfive-jh71x0.h | 2 ++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c index 8f5e5abfa178..17325f17696f 100644 --- a/drivers/clk/starfive/clk-starfive-jh7110-sys.c +++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c @@ -385,6 +385,32 @@ int jh7110_reset_controller_register(struct jh71x0_clk_priv *priv, } EXPORT_SYMBOL_GPL(jh7110_reset_controller_register); +/* + * This clock notifier is called when the rate of PLL0 clock is to be changed. + * The cpu_root clock should save the curent parent clock and switch its parent + * clock to osc before PLL0 rate will be changed. Then switch its parent clock + * back after the PLL0 rate is completed. + */ +static int jh7110_pll0_clk_notifier_cb(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct jh71x0_clk_priv *priv = container_of(nb, struct jh71x0_clk_priv, pll_clk_nb); + struct clk *cpu_root = priv->reg[JH7110_SYSCLK_CPU_ROOT].hw.clk; + int ret = 0; + + if (action == PRE_RATE_CHANGE) { + struct clk *osc = clk_get(priv->dev, "osc"); + + priv->original_clk = clk_get_parent(cpu_root); + ret = clk_set_parent(cpu_root, osc); + clk_put(osc); + } else if (action == POST_RATE_CHANGE) { + ret = clk_set_parent(cpu_root, priv->original_clk); + } + + return notifier_from_errno(ret); +} + static int __init jh7110_syscrg_probe(struct platform_device *pdev) { struct jh71x0_clk_priv *priv; @@ -413,7 +439,10 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev) if (IS_ERR(priv->pll[0])) return PTR_ERR(priv->pll[0]); } else { - clk_put(pllclk); + priv->pll_clk_nb.notifier_call = jh7110_pll0_clk_notifier_cb; + ret = clk_notifier_register(pllclk, &priv->pll_clk_nb); + if (ret) + return ret; priv->pll[0] = NULL; } diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.h b/drivers/clk/starfive/clk-starfive-jh71x0.h index 23e052fc1549..e3f441393e48 100644 --- a/drivers/clk/starfive/clk-starfive-jh71x0.h +++ b/drivers/clk/starfive/clk-starfive-jh71x0.h @@ -114,6 +114,8 @@ struct jh71x0_clk_priv { spinlock_t rmw_lock; struct device *dev; void __iomem *base; + struct clk *original_clk; + struct notifier_block pll_clk_nb; struct clk_hw *pll[3]; struct jh71x0_clk reg[]; }; From 382ef041d60ec1c265fde7aceaf86136c8fe4021 Mon Sep 17 00:00:00 2001 From: Satya Priya Kakitapalli Date: Wed, 31 Jul 2024 11:59:09 +0530 Subject: [PATCH 047/374] clk: qcom: clk-alpha-pll: Fix the pll post div mask commit 2c4553e6c485a96b5d86989eb9654bf20e51e6dd upstream. The PLL_POST_DIV_MASK should be 0 to (width - 1) bits. Fix it. Fixes: 1c3541145cbf ("clk: qcom: support for 2 bit PLL post divider") Cc: stable@vger.kernel.org Reviewed-by: Konrad Dybcio Signed-off-by: Satya Priya Kakitapalli Link: https://lore.kernel.org/r/20240731062916.2680823-2-quic_skakitap@quicinc.com Signed-off-by: Bjorn Andersson Signed-off-by: Greg Kroah-Hartman --- drivers/clk/qcom/clk-alpha-pll.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index c51647e37df8..c9535f143a35 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -40,7 +40,7 @@ #define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL]) # define PLL_POST_DIV_SHIFT 8 -# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0) +# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0) # define PLL_ALPHA_EN BIT(24) # define PLL_ALPHA_MODE BIT(25) # define PLL_VCO_SHIFT 20 From be495259a08e79704f07c9b873535213e5541957 Mon Sep 17 00:00:00 2001 From: Satya Priya Kakitapalli Date: Wed, 31 Jul 2024 11:59:10 +0530 Subject: [PATCH 048/374] clk: qcom: clk-alpha-pll: Fix the trion pll postdiv set rate API commit 4ad1ed6ef27cab94888bb3c740c14042d5c0dff2 upstream. Correct the pll postdiv shift used in clk_trion_pll_postdiv_set_rate API. The shift value is not same for different types of plls and should be taken from the pll's .post_div_shift member. Fixes: 548a909597d5 ("clk: qcom: clk-alpha-pll: Add support for Trion PLLs") Cc: stable@vger.kernel.org Signed-off-by: Satya Priya Kakitapalli Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20240731062916.2680823-3-quic_skakitap@quicinc.com Signed-off-by: Bjorn Andersson Signed-off-by: Greg Kroah-Hartman --- drivers/clk/qcom/clk-alpha-pll.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index c9535f143a35..25a7b4b15c56 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -1505,8 +1505,8 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, } return regmap_update_bits(regmap, PLL_USER_CTL(pll), - PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT, - val << PLL_POST_DIV_SHIFT); + PLL_POST_DIV_MASK(pll) << pll->post_div_shift, + val << pll->post_div_shift); } const struct clk_ops clk_alpha_pll_postdiv_trion_ops = { From 1e8a6948bdbddaaccad709bd800b7dcc63088376 Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Fri, 30 Aug 2024 11:09:07 +0200 Subject: [PATCH 049/374] pinctrl: qcom: x1e80100: Bypass PDC wakeup parent for now commit 602cb14e310a7a32c4f27d1f16c4614c790c7f6f upstream. On X1E80100, GPIO interrupts for wakeup-capable pins have been broken since the introduction of the pinctrl driver. This prevents keyboard and touchpad from working on most of the X1E laptops. So far we have worked around this by manually building a kernel with the "wakeup-parent" removed from the pinctrl node in the device tree, but we cannot expect all users to do that. Implement a similar workaround in the driver by clearing the wakeirq_map for X1E80100. This avoids using the PDC wakeup parent for all GPIOs and handles the interrupts directly in the pinctrl driver instead. The PDC driver needs additional changes to support X1E80100 properly. Adding a workaround separately first allows to land the necessary PDC changes through the normal release cycle, while still solving the more critical problem with keyboard and touchpad on the current stable kernel versions. Bypassing the PDC is enough for now, because we have not yet enabled the deep idle states where using the PDC becomes necessary. Cc: stable@vger.kernel.org Fixes: 05e4941d97ef ("pinctrl: qcom: Add X1E80100 pinctrl driver") Signed-off-by: Stephan Gerhold Reviewed-by: Johan Hovold Tested-by: Johan Hovold Reviewed-by: Konrad Dybcio Reviewed-by: Abel Vesa Link: https://lore.kernel.org/20240830-x1e80100-bypass-pdc-v1-1-d4c00be0c3e3@linaro.org Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/qcom/pinctrl-x1e80100.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c index 65ed933f05ce..abfcdd3da9e8 100644 --- a/drivers/pinctrl/qcom/pinctrl-x1e80100.c +++ b/drivers/pinctrl/qcom/pinctrl-x1e80100.c @@ -1839,7 +1839,9 @@ static const struct msm_pinctrl_soc_data x1e80100_pinctrl = { .ngroups = ARRAY_SIZE(x1e80100_groups), .ngpios = 239, .wakeirq_map = x1e80100_pdc_map, - .nwakeirq_map = ARRAY_SIZE(x1e80100_pdc_map), + /* TODO: Enabling PDC currently breaks GPIO interrupts */ + .nwakeirq_map = 0, + /* .nwakeirq_map = ARRAY_SIZE(x1e80100_pdc_map), */ .egpio_func = 9, }; From e554113a1cd2a9cfc6c7af7bdea2141c5757e188 Mon Sep 17 00:00:00 2001 From: Simon Arlott Date: Thu, 22 Aug 2024 08:25:07 +0100 Subject: [PATCH 050/374] can: mcp251x: fix deadlock if an interrupt occurs during mcp251x_open commit 7dd9c26bd6cf679bcfdef01a8659791aa6487a29 upstream. The mcp251x_hw_wake() function is called with the mpc_lock mutex held and disables the interrupt handler so that no interrupts can be processed while waking the device. If an interrupt has already occurred then waiting for the interrupt handler to complete will deadlock because it will be trying to acquire the same mutex. CPU0 CPU1 ---- ---- mcp251x_open() mutex_lock(&priv->mcp_lock) request_threaded_irq() mcp251x_can_ist() mutex_lock(&priv->mcp_lock) mcp251x_hw_wake() disable_irq() <-- deadlock Use disable_irq_nosync() instead because the interrupt handler does everything while holding the mutex so it doesn't matter if it's still running. Fixes: 8ce8c0abcba3 ("can: mcp251x: only reset hardware as required") Signed-off-by: Simon Arlott Reviewed-by: Przemek Kitszel Cc: stable@vger.kernel.org Link: https://lore.kernel.org/all/4fc08687-1d80-43fe-9f0d-8ef8475e75f6@0882a8b5-c6c3-11e9-b005-00805fc181fe.uuid.home.arpa Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/spi/mcp251x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 79c4bab5f724..8c56f85e87c1 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -753,7 +753,7 @@ static int mcp251x_hw_wake(struct spi_device *spi) int ret; /* Force wakeup interrupt to wake device, but don't execute IST */ - disable_irq(spi->irq); + disable_irq_nosync(spi->irq); mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF); /* Wait for oscillator startup timer after wake up */ From 1e96ae38257cd6ce2ca56df3dc7a65aa3a92529a Mon Sep 17 00:00:00 2001 From: Adrian Huang Date: Thu, 29 Aug 2024 21:06:33 +0800 Subject: [PATCH 051/374] mm: vmalloc: optimize vmap_lazy_nr arithmetic when purging each vmap_area commit 409faf8c97d5abb0597ea43e99c8b3dd8dbe99e3 upstream. When running the vmalloc stress on a 448-core system, observe the average latency of purge_vmap_node() is about 2 seconds by using the eBPF/bcc 'funclatency.py' tool [1]. # /your-git-repo/bcc/tools/funclatency.py -u purge_vmap_node & pid1=$! && sleep 8 && modprobe test_vmalloc nr_threads=$(nproc) run_test_mask=0x7; kill -SIGINT $pid1 usecs : count distribution 0 -> 1 : 0 | | 2 -> 3 : 29 | | 4 -> 7 : 19 | | 8 -> 15 : 56 | | 16 -> 31 : 483 |**** | 32 -> 63 : 1548 |************ | 64 -> 127 : 2634 |********************* | 128 -> 255 : 2535 |********************* | 256 -> 511 : 1776 |************** | 512 -> 1023 : 1015 |******** | 1024 -> 2047 : 573 |**** | 2048 -> 4095 : 488 |**** | 4096 -> 8191 : 1091 |********* | 8192 -> 16383 : 3078 |************************* | 16384 -> 32767 : 4821 |****************************************| 32768 -> 65535 : 3318 |*************************** | 65536 -> 131071 : 1718 |************** | 131072 -> 262143 : 2220 |****************** | 262144 -> 524287 : 1147 |********* | 524288 -> 1048575 : 1179 |********* | 1048576 -> 2097151 : 822 |****** | 2097152 -> 4194303 : 906 |******* | 4194304 -> 8388607 : 2148 |***************** | 8388608 -> 16777215 : 4497 |************************************* | 16777216 -> 33554431 : 289 |** | avg = 2041714 usecs, total: 78381401772 usecs, count: 38390 The worst case is over 16-33 seconds, so soft lockup is triggered [2]. [Root Cause] 1) Each purge_list has the long list. The following shows the number of vmap_area is purged. crash> p vmap_nodes vmap_nodes = $27 = (struct vmap_node *) 0xff2de5a900100000 crash> vmap_node 0xff2de5a900100000 128 | grep nr_purged nr_purged = 663070 ... nr_purged = 821670 nr_purged = 692214 nr_purged = 726808 ... 2) atomic_long_sub() employs the 'lock' prefix to ensure the atomic operation when purging each vmap_area. However, the iteration is over 600000 vmap_area (See 'nr_purged' above). Here is objdump output: $ objdump -D vmlinux ffffffff813e8c80 : ... ffffffff813e8d70: f0 48 29 2d 68 0c bb lock sub %rbp,0x2bb0c68(%rip) ... Quote from "Instruction tables" pdf file [3]: Instructions with a LOCK prefix have a long latency that depends on cache organization and possibly RAM speed. If there are multiple processors or cores or direct memory access (DMA) devices, then all locked instructions will lock a cache line for exclusive access, which may involve RAM access. A LOCK prefix typically costs more than a hundred clock cycles, even on single-processor systems. That's why the latency of purge_vmap_node() dramatically increases on a many-core system: One core is busy on purging each vmap_area of the *long* purge_list and executing atomic_long_sub() for each vmap_area, while other cores free vmalloc allocations and execute atomic_long_add_return() in free_vmap_area_noflush(). [Solution] Employ a local variable to record the total purged pages, and execute atomic_long_sub() after the traversal of the purge_list is done. The experiment result shows the latency improvement is 99%. [Experiment Result] 1) System Configuration: Three servers (with HT-enabled) are tested. * 72-core server: 3rd Gen Intel Xeon Scalable Processor*1 * 192-core server: 5th Gen Intel Xeon Scalable Processor*2 * 448-core server: AMD Zen 4 Processor*2 2) Kernel Config * CONFIG_KASAN is disabled 3) The data in column "w/o patch" and "w/ patch" * Unit: micro seconds (us) * Each data is the average of 3-time measurements System w/o patch (us) w/ patch (us) Improvement (%) --------------- -------------- ------------- ------------- 72-core server 2194 14 99.36% 192-core server 143799 1139 99.21% 448-core server 1992122 6883 99.65% [1] https://github.com/iovisor/bcc/blob/master/tools/funclatency.py [2] https://gist.github.com/AdrianHuang/37c15f67b45407b83c2d32f918656c12 [3] https://www.agner.org/optimize/instruction_tables.pdf Link: https://lkml.kernel.org/r/20240829130633.2184-1-ahuang12@lenovo.com Signed-off-by: Adrian Huang Reviewed-by: Uladzislau Rezki (Sony) Cc: Christoph Hellwig Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/vmalloc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 881e497137e5..9816ee942c7e 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2190,6 +2190,7 @@ static void purge_vmap_node(struct work_struct *work) { struct vmap_node *vn = container_of(work, struct vmap_node, purge_work); + unsigned long nr_purged_pages = 0; struct vmap_area *va, *n_va; LIST_HEAD(local_list); @@ -2207,7 +2208,7 @@ static void purge_vmap_node(struct work_struct *work) kasan_release_vmalloc(orig_start, orig_end, va->va_start, va->va_end); - atomic_long_sub(nr, &vmap_lazy_nr); + nr_purged_pages += nr; vn->nr_purged++; if (is_vn_id_valid(vn_id) && !vn->skip_populate) @@ -2218,6 +2219,8 @@ static void purge_vmap_node(struct work_struct *work) list_add(&va->list, &local_list); } + atomic_long_sub(nr_purged_pages, &vmap_lazy_nr); + reclaim_list_global(&local_list); } From 77e240511603519e5e19667803c5f87c7ee8fb3e Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Wed, 28 Aug 2024 16:15:36 -0700 Subject: [PATCH 052/374] alloc_tag: fix allocation tag reporting when CONFIG_MODULES=n commit 052a45c1cb1b32f05dd63a295d65496d8b403283 upstream. codetag_module_init() is used to initialize sections containing allocation tags. This function is used to initialize module sections as well as core kernel sections, in which case the module parameter is set to NULL. This function has to be called even when CONFIG_MODULES=n to initialize core kernel allocation tag sections. When CONFIG_MODULES=n, this function is a NOP, which is wrong. This leads to /proc/allocinfo reported as empty. Fix this by making it independent of CONFIG_MODULES. Link: https://lkml.kernel.org/r/20240828231536.1770519-1-surenb@google.com Fixes: 916cc5167cc6 ("lib: code tagging framework") Signed-off-by: Suren Baghdasaryan Cc: David Hildenbrand Cc: Kees Cook Cc: Kent Overstreet Cc: Pasha Tatashin Cc: Sourav Panda Cc: Vlastimil Babka Cc: [6.10+] Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- lib/codetag.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/lib/codetag.c b/lib/codetag.c index 5ace625f2328..afa8a2d4f317 100644 --- a/lib/codetag.c +++ b/lib/codetag.c @@ -125,7 +125,6 @@ static inline size_t range_size(const struct codetag_type *cttype, cttype->desc.tag_size; } -#ifdef CONFIG_MODULES static void *get_symbol(struct module *mod, const char *prefix, const char *name) { DECLARE_SEQ_BUF(sb, KSYM_NAME_LEN); @@ -155,6 +154,15 @@ static struct codetag_range get_section_range(struct module *mod, }; } +static const char *get_mod_name(__maybe_unused struct module *mod) +{ +#ifdef CONFIG_MODULES + if (mod) + return mod->name; +#endif + return "(built-in)"; +} + static int codetag_module_init(struct codetag_type *cttype, struct module *mod) { struct codetag_range range; @@ -164,8 +172,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) range = get_section_range(mod, cttype->desc.section); if (!range.start || !range.stop) { pr_warn("Failed to load code tags of type %s from the module %s\n", - cttype->desc.section, - mod ? mod->name : "(built-in)"); + cttype->desc.section, get_mod_name(mod)); return -EINVAL; } @@ -199,6 +206,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) return 0; } +#ifdef CONFIG_MODULES void codetag_load_module(struct module *mod) { struct codetag_type *cttype; @@ -248,9 +256,6 @@ bool codetag_unload_module(struct module *mod) return unload_ok; } - -#else /* CONFIG_MODULES */ -static int codetag_module_init(struct codetag_type *cttype, struct module *mod) { return 0; } #endif /* CONFIG_MODULES */ struct codetag_type * From 16ad36c8e66a26626e7d0224100b433483a2acef Mon Sep 17 00:00:00 2001 From: Hao Ge Date: Mon, 26 Aug 2024 00:36:49 +0800 Subject: [PATCH 053/374] codetag: debug: mark codetags for poisoned page as empty commit 5e9784e997620af7c1399029282f5d6964b41942 upstream. When PG_hwpoison pages are freed they are treated differently in free_pages_prepare() and instead of being released they are isolated. Page allocation tag counters are decremented at this point since the page is considered not in use. Later on when such pages are released by unpoison_memory(), the allocation tag counters will be decremented again and the following warning gets reported: [ 113.930443][ T3282] ------------[ cut here ]------------ [ 113.931105][ T3282] alloc_tag was not set [ 113.931576][ T3282] WARNING: CPU: 2 PID: 3282 at ./include/linux/alloc_tag.h:130 pgalloc_tag_sub.part.66+0x154/0x164 [ 113.932866][ T3282] Modules linked in: hwpoison_inject fuse ip6t_rpfilter ip6t_REJECT nf_reject_ipv6 ipt_REJECT nf_reject_ipv4 xt_conntrack ebtable_nat ebtable_broute ip6table_nat ip6table_man4 [ 113.941638][ T3282] CPU: 2 UID: 0 PID: 3282 Comm: madvise11 Kdump: loaded Tainted: G W 6.11.0-rc4-dirty #18 [ 113.943003][ T3282] Tainted: [W]=WARN [ 113.943453][ T3282] Hardware name: QEMU KVM Virtual Machine, BIOS unknown 2/2/2022 [ 113.944378][ T3282] pstate: 40400005 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 113.945319][ T3282] pc : pgalloc_tag_sub.part.66+0x154/0x164 [ 113.946016][ T3282] lr : pgalloc_tag_sub.part.66+0x154/0x164 [ 113.946706][ T3282] sp : ffff800087093a10 [ 113.947197][ T3282] x29: ffff800087093a10 x28: ffff0000d7a9d400 x27: ffff80008249f0a0 [ 113.948165][ T3282] x26: 0000000000000000 x25: ffff80008249f2b0 x24: 0000000000000000 [ 113.949134][ T3282] x23: 0000000000000001 x22: 0000000000000001 x21: 0000000000000000 [ 113.950597][ T3282] x20: ffff0000c08fcad8 x19: ffff80008251e000 x18: ffffffffffffffff [ 113.952207][ T3282] x17: 0000000000000000 x16: 0000000000000000 x15: ffff800081746210 [ 113.953161][ T3282] x14: 0000000000000000 x13: 205d323832335420 x12: 5b5d353031313339 [ 113.954120][ T3282] x11: ffff800087093500 x10: 000000000000005d x9 : 00000000ffffffd0 [ 113.955078][ T3282] x8 : 7f7f7f7f7f7f7f7f x7 : ffff80008236ba90 x6 : c0000000ffff7fff [ 113.956036][ T3282] x5 : ffff000b34bf4dc8 x4 : ffff8000820aba90 x3 : 0000000000000001 [ 113.956994][ T3282] x2 : ffff800ab320f000 x1 : 841d1e35ac932e00 x0 : 0000000000000000 [ 113.957962][ T3282] Call trace: [ 113.958350][ T3282] pgalloc_tag_sub.part.66+0x154/0x164 [ 113.959000][ T3282] pgalloc_tag_sub+0x14/0x1c [ 113.959539][ T3282] free_unref_page+0xf4/0x4b8 [ 113.960096][ T3282] __folio_put+0xd4/0x120 [ 113.960614][ T3282] folio_put+0x24/0x50 [ 113.961103][ T3282] unpoison_memory+0x4f0/0x5b0 [ 113.961678][ T3282] hwpoison_unpoison+0x30/0x48 [hwpoison_inject] [ 113.962436][ T3282] simple_attr_write_xsigned.isra.34+0xec/0x1cc [ 113.963183][ T3282] simple_attr_write+0x38/0x48 [ 113.963750][ T3282] debugfs_attr_write+0x54/0x80 [ 113.964330][ T3282] full_proxy_write+0x68/0x98 [ 113.964880][ T3282] vfs_write+0xdc/0x4d0 [ 113.965372][ T3282] ksys_write+0x78/0x100 [ 113.965875][ T3282] __arm64_sys_write+0x24/0x30 [ 113.966440][ T3282] invoke_syscall+0x7c/0x104 [ 113.966984][ T3282] el0_svc_common.constprop.1+0x88/0x104 [ 113.967652][ T3282] do_el0_svc+0x2c/0x38 [ 113.968893][ T3282] el0_svc+0x3c/0x1b8 [ 113.969379][ T3282] el0t_64_sync_handler+0x98/0xbc [ 113.969980][ T3282] el0t_64_sync+0x19c/0x1a0 [ 113.970511][ T3282] ---[ end trace 0000000000000000 ]--- To fix this, clear the page tag reference after the page got isolated and accounted for. Link: https://lkml.kernel.org/r/20240825163649.33294-1-hao.ge@linux.dev Fixes: d224eb0287fb ("codetag: debug: mark codetags for reserved pages as empty") Signed-off-by: Hao Ge Reviewed-by: Miaohe Lin Acked-by: Suren Baghdasaryan Cc: David Hildenbrand Cc: Hao Ge Cc: Kent Overstreet Cc: Naoya Horiguchi Cc: Pasha Tatashin Cc: [6.10+] Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/page_alloc.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b50060405d94..21016573f187 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1053,6 +1053,13 @@ __always_inline bool free_pages_prepare(struct page *page, reset_page_owner(page, order); page_table_check_free(page, order); pgalloc_tag_sub(page, 1 << order); + + /* + * The page is isolated and accounted for. + * Mark the codetag as empty to avoid accounting error + * when the page is freed by unpoison_memory(). + */ + clear_page_tag_ref(page); return false; } From bd4c713e0652b2a32b15b46096afc29116cb6bd5 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 20 Aug 2024 13:54:17 -0400 Subject: [PATCH 054/374] maple_tree: remove rcu_read_lock() from mt_validate() commit f806de88d8f7f8191afd0fd9b94db4cd058e7d4f upstream. The write lock should be held when validating the tree to avoid updates racing with checks. Holding the rcu read lock during a large tree validation may also cause a prolonged rcu read window and "rcu_preempt detected stalls" warnings. Link: https://lore.kernel.org/all/0000000000001d12d4062005aea1@google.com/ Link: https://lkml.kernel.org/r/20240820175417.2782532-1-Liam.Howlett@oracle.com Fixes: 54a611b60590 ("Maple Tree: add new data structure") Signed-off-by: Liam R. Howlett Reported-by: syzbot+036af2f0c7338a33b0cd@syzkaller.appspotmail.com Cc: Hillf Danton Cc: Matthew Wilcox Cc: "Paul E. McKenney" Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- lib/maple_tree.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 2d7d27e6ae3c..f5ae38fd6696 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -7569,14 +7569,14 @@ static void mt_validate_nulls(struct maple_tree *mt) * 2. The gap is correctly set in the parents */ void mt_validate(struct maple_tree *mt) + __must_hold(mas->tree->ma_lock) { unsigned char end; MA_STATE(mas, mt, 0, 0); - rcu_read_lock(); mas_start(&mas); if (!mas_is_active(&mas)) - goto done; + return; while (!mte_is_leaf(mas.node)) mas_descend(&mas); @@ -7597,9 +7597,6 @@ void mt_validate(struct maple_tree *mt) mas_dfs_postorder(&mas, ULONG_MAX); } mt_validate_nulls(mt); -done: - rcu_read_unlock(); - } EXPORT_SYMBOL_GPL(mt_validate); From 55c39bb701ba64b85b0cbb7b55da961db5ea5d72 Mon Sep 17 00:00:00 2001 From: Petr Tesarik Date: Mon, 5 Aug 2024 17:07:50 +0200 Subject: [PATCH 055/374] kexec_file: fix elfcorehdr digest exclusion when CONFIG_CRASH_HOTPLUG=y commit 6dacd79d28842ff01f18b4900d897741aac5999e upstream. Fix the condition to exclude the elfcorehdr segment from the SHA digest calculation. The j iterator is an index into the output sha_regions[] array, not into the input image->segment[] array. Once it reaches image->elfcorehdr_index, all subsequent segments are excluded. Besides, if the purgatory segment precedes the elfcorehdr segment, the elfcorehdr may be wrongly included in the calculation. Link: https://lkml.kernel.org/r/20240805150750.170739-1-petr.tesarik@suse.com Fixes: f7cc804a9fd4 ("kexec: exclude elfcorehdr from the segment digest") Signed-off-by: Petr Tesarik Acked-by: Baoquan He Cc: Eric Biederman Cc: Hari Bathini Cc: Sourabh Jain Cc: Eric DeVolder Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- kernel/kexec_file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 3d64290d24c9..3eedb8c226ad 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -752,7 +752,7 @@ static int kexec_calculate_store_digests(struct kimage *image) #ifdef CONFIG_CRASH_HOTPLUG /* Exclude elfcorehdr segment to allow future changes via hotplug */ - if (j == image->elfcorehdr_index) + if (i == image->elfcorehdr_index) continue; #endif From 6cf74e0e5e3ab5d5c9defb4c73dad54d52224671 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 12 Aug 2024 18:16:06 +0100 Subject: [PATCH 056/374] mm: vmalloc: ensure vmap_block is initialised before adding to queue commit 3e3de7947c751509027d26b679ecd243bc9db255 upstream. Commit 8c61291fd850 ("mm: fix incorrect vbq reference in purge_fragmented_block") extended the 'vmap_block' structure to contain a 'cpu' field which is set at allocation time to the id of the initialising CPU. When a new 'vmap_block' is being instantiated by new_vmap_block(), the partially initialised structure is added to the local 'vmap_block_queue' xarray before the 'cpu' field has been initialised. If another CPU is concurrently walking the xarray (e.g. via vm_unmap_aliases()), then it may perform an out-of-bounds access to the remote queue thanks to an uninitialised index. This has been observed as UBSAN errors in Android: | Internal error: UBSAN: array index out of bounds: 00000000f2005512 [#1] PREEMPT SMP | | Call trace: | purge_fragmented_block+0x204/0x21c | _vm_unmap_aliases+0x170/0x378 | vm_unmap_aliases+0x1c/0x28 | change_memory_common+0x1dc/0x26c | set_memory_ro+0x18/0x24 | module_enable_ro+0x98/0x238 | do_init_module+0x1b0/0x310 Move the initialisation of 'vb->cpu' in new_vmap_block() ahead of the addition to the xarray. Link: https://lkml.kernel.org/r/20240812171606.17486-1-will@kernel.org Fixes: 8c61291fd850 ("mm: fix incorrect vbq reference in purge_fragmented_block") Signed-off-by: Will Deacon Reviewed-by: Baoquan He Reviewed-by: Uladzislau Rezki (Sony) Cc: Zhaoyang Huang Cc: Hailong.Liu Cc: Christoph Hellwig Cc: Lorenzo Stoakes Cc: Thomas Gleixner Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/vmalloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9816ee942c7e..bc6be460457d 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2628,6 +2628,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) vb->dirty_max = 0; bitmap_set(vb->used_map, 0, (1UL << order)); INIT_LIST_HEAD(&vb->free_list); + vb->cpu = raw_smp_processor_id(); xa = addr_to_vb_xa(va->va_start); vb_idx = addr_to_vb_idx(va->va_start); @@ -2644,7 +2645,6 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) * integrity together with list_for_each_rcu from read * side. */ - vb->cpu = raw_smp_processor_id(); vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu); spin_lock(&vbq->lock); list_add_tail_rcu(&vb->free_list, &vbq->free); From 2d476c86ba4745fcbc912ce4627df4fa80caa9ad Mon Sep 17 00:00:00 2001 From: Hao Ge Date: Fri, 16 Aug 2024 09:33:36 +0800 Subject: [PATCH 057/374] mm/slub: add check for s->flags in the alloc_tagging_slab_free_hook commit ab7ca09520e9c41c219a4427fe0dae24024bfe7f upstream. When enable CONFIG_MEMCG & CONFIG_KFENCE & CONFIG_KMEMLEAK, the following warning always occurs,This is because the following call stack occurred: mem_pool_alloc kmem_cache_alloc_noprof slab_alloc_node kfence_alloc Once the kfence allocation is successful,slab->obj_exts will not be empty, because it has already been assigned a value in kfence_init_pool. Since in the prepare_slab_obj_exts_hook function,we perform a check for s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE),the alloc_tag_add function will not be called as a result.Therefore,ref->ct remains NULL. However,when we call mem_pool_free,since obj_ext is not empty, it eventually leads to the alloc_tag_sub scenario being invoked. This is where the warning occurs. So we should add corresponding checks in the alloc_tagging_slab_free_hook. For __GFP_NO_OBJ_EXT case,I didn't see the specific case where it's using kfence,so I won't add the corresponding check in alloc_tagging_slab_free_hook for now. [ 3.734349] ------------[ cut here ]------------ [ 3.734807] alloc_tag was not set [ 3.735129] WARNING: CPU: 4 PID: 40 at ./include/linux/alloc_tag.h:130 kmem_cache_free+0x444/0x574 [ 3.735866] Modules linked in: autofs4 [ 3.736211] CPU: 4 UID: 0 PID: 40 Comm: ksoftirqd/4 Tainted: G W 6.11.0-rc3-dirty #1 [ 3.736969] Tainted: [W]=WARN [ 3.737258] Hardware name: QEMU KVM Virtual Machine, BIOS unknown 2/2/2022 [ 3.737875] pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 3.738501] pc : kmem_cache_free+0x444/0x574 [ 3.738951] lr : kmem_cache_free+0x444/0x574 [ 3.739361] sp : ffff80008357bb60 [ 3.739693] x29: ffff80008357bb70 x28: 0000000000000000 x27: 0000000000000000 [ 3.740338] x26: ffff80008207f000 x25: ffff000b2eb2fd60 x24: ffff0000c0005700 [ 3.740982] x23: ffff8000804229e4 x22: ffff800082080000 x21: ffff800081756000 [ 3.741630] x20: fffffd7ff8253360 x19: 00000000000000a8 x18: ffffffffffffffff [ 3.742274] x17: ffff800ab327f000 x16: ffff800083398000 x15: ffff800081756df0 [ 3.742919] x14: 0000000000000000 x13: 205d344320202020 x12: 5b5d373038343337 [ 3.743560] x11: ffff80008357b650 x10: 000000000000005d x9 : 00000000ffffffd0 [ 3.744231] x8 : 7f7f7f7f7f7f7f7f x7 : ffff80008237bad0 x6 : c0000000ffff7fff [ 3.744907] x5 : ffff80008237ba78 x4 : ffff8000820bbad0 x3 : 0000000000000001 [ 3.745580] x2 : 68d66547c09f7800 x1 : 68d66547c09f7800 x0 : 0000000000000000 [ 3.746255] Call trace: [ 3.746530] kmem_cache_free+0x444/0x574 [ 3.746931] mem_pool_free+0x44/0xf4 [ 3.747306] free_object_rcu+0xc8/0xdc [ 3.747693] rcu_do_batch+0x234/0x8a4 [ 3.748075] rcu_core+0x230/0x3e4 [ 3.748424] rcu_core_si+0x14/0x1c [ 3.748780] handle_softirqs+0x134/0x378 [ 3.749189] run_ksoftirqd+0x70/0x9c [ 3.749560] smpboot_thread_fn+0x148/0x22c [ 3.749978] kthread+0x10c/0x118 [ 3.750323] ret_from_fork+0x10/0x20 [ 3.750696] ---[ end trace 0000000000000000 ]--- Link: https://lkml.kernel.org/r/20240816013336.17505-1-hao.ge@linux.dev Fixes: 4b8736964640 ("mm/slab: add allocation accounting into slab allocation and free paths") Signed-off-by: Hao Ge Cc: Christoph Lameter Cc: David Rientjes Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Joonsoo Kim Cc: Kees Cook Cc: Kent Overstreet Cc: Pekka Enberg Cc: Roman Gushchin Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/slub.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/slub.c b/mm/slub.c index 849e8972e2ff..be0ef60984ac 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2044,6 +2044,10 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, if (!mem_alloc_profiling_enabled()) return; + /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ + if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) + return; + obj_exts = slab_obj_exts(slab); if (!obj_exts) return; From e7eb8b9c8391d3e15aa7334ec179009fd74ed73f Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Wed, 21 Aug 2024 20:26:07 +0100 Subject: [PATCH 058/374] Revert "mm: skip CMA pages when they are not available" commit bfe0857c20c663fcc1592fa4e3a61ca12b07dac9 upstream. This reverts commit 5da226dbfce3 ("mm: skip CMA pages when they are not available") and b7108d66318a ("Multi-gen LRU: skip CMA pages when they are not eligible"). lruvec->lru_lock is highly contended and is held when calling isolate_lru_folios. If the lru has a large number of CMA folios consecutively, while the allocation type requested is not MIGRATE_MOVABLE, isolate_lru_folios can hold the lock for a very long time while it skips those. For FIO workload, ~150million order=0 folios were skipped to isolate a few ZONE_DMA folios [1]. This can cause lockups [1] and high memory pressure for extended periods of time [2]. Remove skipping CMA for MGLRU as well, as it was introduced in sort_folio for the same resaon as 5da226dbfce3a2f44978c2c7cf88166e69a6788b. [1] https://lore.kernel.org/all/CAOUHufbkhMZYz20aM_3rHZ3OcK4m2puji2FGpUpn_-DevGk3Kg@mail.gmail.com/ [2] https://lore.kernel.org/all/ZrssOrcJIDy8hacI@gmail.com/ [usamaarif642@gmail.com: also revert b7108d66318a, per Johannes] Link: https://lkml.kernel.org/r/9060a32d-b2d7-48c0-8626-1db535653c54@gmail.com Link: https://lkml.kernel.org/r/357ac325-4c61-497a-92a3-bdbd230d5ec9@gmail.com Link: https://lkml.kernel.org/r/9060a32d-b2d7-48c0-8626-1db535653c54@gmail.com Fixes: 5da226dbfce3 ("mm: skip CMA pages when they are not available") Signed-off-by: Usama Arif Acked-by: Johannes Weiner Cc: Bharata B Rao Cc: Breno Leitao Cc: David Hildenbrand Cc: Matthew Wilcox Cc: Rik van Riel Cc: Vlastimil Babka Cc: Yu Zhao Cc: Zhaoyang Huang Cc: Zhaoyang Huang Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/vmscan.c | 24 ++---------------------- 1 file changed, 2 insertions(+), 22 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 68ac33bea3a3..99a0c152a85e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1587,25 +1587,6 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, } -#ifdef CONFIG_CMA -/* - * It is waste of effort to scan and reclaim CMA pages if it is not available - * for current allocation context. Kswapd can not be enrolled as it can not - * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL - */ -static bool skip_cma(struct folio *folio, struct scan_control *sc) -{ - return !current_is_kswapd() && - gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE && - folio_migratetype(folio) == MIGRATE_CMA; -} -#else -static bool skip_cma(struct folio *folio, struct scan_control *sc) -{ - return false; -} -#endif - /* * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. * @@ -1652,8 +1633,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, nr_pages = folio_nr_pages(folio); total_scan += nr_pages; - if (folio_zonenum(folio) > sc->reclaim_idx || - skip_cma(folio, sc)) { + if (folio_zonenum(folio) > sc->reclaim_idx) { nr_skipped[folio_zonenum(folio)] += nr_pages; move_to = &folios_skipped; goto move; @@ -4314,7 +4294,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c } /* ineligible */ - if (zone > sc->reclaim_idx || skip_cma(folio, sc)) { + if (zone > sc->reclaim_idx) { gen = folio_inc_gen(lruvec, folio, false); list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); return true; From 0efbad8445fbba7896402500a1473450a299a08a Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Tue, 27 Aug 2024 10:11:16 -0700 Subject: [PATCH 059/374] spi: rockchip: Resolve unbalanced runtime PM / system PM handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit be721b451affbecc4ba4eaac3b71cdbdcade1b1b upstream. Commit e882575efc77 ("spi: rockchip: Suspend and resume the bus during NOIRQ_SYSTEM_SLEEP_PM ops") stopped respecting runtime PM status and simply disabled clocks unconditionally when suspending the system. This causes problems when the device is already runtime suspended when we go to sleep -- in which case we double-disable clocks and produce a WARNing. Switch back to pm_runtime_force_{suspend,resume}(), because that still seems like the right thing to do, and the aforementioned commit makes no explanation why it stopped using it. Also, refactor some of the resume() error handling, because it's not actually a good idea to re-disable clocks on failure. Fixes: e882575efc77 ("spi: rockchip: Suspend and resume the bus during NOIRQ_SYSTEM_SLEEP_PM ops") Cc: stable@vger.kernel.org Reported-by: Ondřej Jirman Closes: https://lore.kernel.org/lkml/20220621154218.sau54jeij4bunf56@core/ Signed-off-by: Brian Norris Link: https://patch.msgid.link/20240827171126.1115748-1-briannorris@chromium.org Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spi-rockchip.c | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index e1ecd96c7858..0bb33c43b1b4 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -945,14 +945,16 @@ static int rockchip_spi_suspend(struct device *dev) { int ret; struct spi_controller *ctlr = dev_get_drvdata(dev); - struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); ret = spi_controller_suspend(ctlr); if (ret < 0) return ret; - clk_disable_unprepare(rs->spiclk); - clk_disable_unprepare(rs->apb_pclk); + ret = pm_runtime_force_suspend(dev); + if (ret < 0) { + spi_controller_resume(ctlr); + return ret; + } pinctrl_pm_select_sleep_state(dev); @@ -963,25 +965,14 @@ static int rockchip_spi_resume(struct device *dev) { int ret; struct spi_controller *ctlr = dev_get_drvdata(dev); - struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); pinctrl_pm_select_default_state(dev); - ret = clk_prepare_enable(rs->apb_pclk); + ret = pm_runtime_force_resume(dev); if (ret < 0) return ret; - ret = clk_prepare_enable(rs->spiclk); - if (ret < 0) - clk_disable_unprepare(rs->apb_pclk); - - ret = spi_controller_resume(ctlr); - if (ret < 0) { - clk_disable_unprepare(rs->spiclk); - clk_disable_unprepare(rs->apb_pclk); - } - - return 0; + return spi_controller_resume(ctlr); } #endif /* CONFIG_PM_SLEEP */ From 27282d2505b402f39371fd60d19d95c01a4b6776 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 4 Sep 2024 10:34:28 -0400 Subject: [PATCH 060/374] tracing/osnoise: Use a cpumask to know what threads are kthreads commit 177e1cc2f41235c145041eed03ef5bab18f32328 upstream. The start_kthread() and stop_thread() code was not always called with the interface_lock held. This means that the kthread variable could be unexpectedly changed causing the kthread_stop() to be called on it when it should not have been, leading to: while true; do rtla timerlat top -u -q & PID=$!; sleep 5; kill -INT $PID; sleep 0.001; kill -TERM $PID; wait $PID; done Causing the following OOPS: Oops: general protection fault, probably for non-canonical address 0xdffffc0000000002: 0000 [#1] PREEMPT SMP KASAN PTI KASAN: null-ptr-deref in range [0x0000000000000010-0x0000000000000017] CPU: 5 UID: 0 PID: 885 Comm: timerlatu/5 Not tainted 6.11.0-rc4-test-00002-gbc754cc76d1b-dirty #125 a533010b71dab205ad2f507188ce8c82203b0254 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014 RIP: 0010:hrtimer_active+0x58/0x300 Code: 48 c1 ee 03 41 54 48 01 d1 48 01 d6 55 53 48 83 ec 20 80 39 00 0f 85 30 02 00 00 49 8b 6f 30 4c 8d 75 10 4c 89 f0 48 c1 e8 03 <0f> b6 3c 10 4c 89 f0 83 e0 07 83 c0 03 40 38 f8 7c 09 40 84 ff 0f RSP: 0018:ffff88811d97f940 EFLAGS: 00010202 RAX: 0000000000000002 RBX: ffff88823c6b5b28 RCX: ffffed10478d6b6b RDX: dffffc0000000000 RSI: ffffed10478d6b6c RDI: ffff88823c6b5b28 RBP: 0000000000000000 R08: ffff88823c6b5b58 R09: ffff88823c6b5b60 R10: ffff88811d97f957 R11: 0000000000000010 R12: 00000000000a801d R13: ffff88810d8b35d8 R14: 0000000000000010 R15: ffff88823c6b5b28 FS: 0000000000000000(0000) GS:ffff88823c680000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000561858ad7258 CR3: 000000007729e001 CR4: 0000000000170ef0 Call Trace: ? die_addr+0x40/0xa0 ? exc_general_protection+0x154/0x230 ? asm_exc_general_protection+0x26/0x30 ? hrtimer_active+0x58/0x300 ? __pfx_mutex_lock+0x10/0x10 ? __pfx_locks_remove_file+0x10/0x10 hrtimer_cancel+0x15/0x40 timerlat_fd_release+0x8e/0x1f0 ? security_file_release+0x43/0x80 __fput+0x372/0xb10 task_work_run+0x11e/0x1f0 ? _raw_spin_lock+0x85/0xe0 ? __pfx_task_work_run+0x10/0x10 ? poison_slab_object+0x109/0x170 ? do_exit+0x7a0/0x24b0 do_exit+0x7bd/0x24b0 ? __pfx_migrate_enable+0x10/0x10 ? __pfx_do_exit+0x10/0x10 ? __pfx_read_tsc+0x10/0x10 ? ktime_get+0x64/0x140 ? _raw_spin_lock_irq+0x86/0xe0 do_group_exit+0xb0/0x220 get_signal+0x17ba/0x1b50 ? vfs_read+0x179/0xa40 ? timerlat_fd_read+0x30b/0x9d0 ? __pfx_get_signal+0x10/0x10 ? __pfx_timerlat_fd_read+0x10/0x10 arch_do_signal_or_restart+0x8c/0x570 ? __pfx_arch_do_signal_or_restart+0x10/0x10 ? vfs_read+0x179/0xa40 ? ksys_read+0xfe/0x1d0 ? __pfx_ksys_read+0x10/0x10 syscall_exit_to_user_mode+0xbc/0x130 do_syscall_64+0x74/0x110 ? __pfx___rseq_handle_notify_resume+0x10/0x10 ? __pfx_ksys_read+0x10/0x10 ? fpregs_restore_userregs+0xdb/0x1e0 ? fpregs_restore_userregs+0xdb/0x1e0 ? syscall_exit_to_user_mode+0x116/0x130 ? do_syscall_64+0x74/0x110 ? do_syscall_64+0x74/0x110 ? do_syscall_64+0x74/0x110 entry_SYSCALL_64_after_hwframe+0x71/0x79 RIP: 0033:0x7ff0070eca9c Code: Unable to access opcode bytes at 0x7ff0070eca72. RSP: 002b:00007ff006dff8c0 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 RAX: 0000000000000000 RBX: 0000000000000005 RCX: 00007ff0070eca9c RDX: 0000000000000400 RSI: 00007ff006dff9a0 RDI: 0000000000000003 RBP: 00007ff006dffde0 R08: 0000000000000000 R09: 00007ff000000ba0 R10: 00007ff007004b08 R11: 0000000000000246 R12: 0000000000000003 R13: 00007ff006dff9a0 R14: 0000000000000007 R15: 0000000000000008 Modules linked in: snd_hda_intel snd_intel_dspcfg snd_intel_sdw_acpi snd_hda_codec snd_hwdep snd_hda_core ---[ end trace 0000000000000000 ]--- This is because it would mistakenly call kthread_stop() on a user space thread making it "exit" before it actually exits. Since kthreads are created based on global behavior, use a cpumask to know when kthreads are running and that they need to be shutdown before proceeding to do new work. Link: https://lore.kernel.org/all/20240820130001.124768-1-tglozar@redhat.com/ This was debugged by using the persistent ring buffer: Link: https://lore.kernel.org/all/20240823013902.135036960@goodmis.org/ Note, locking was originally used to fix this, but that proved to cause too many deadlocks to work around: https://lore.kernel.org/linux-trace-kernel/20240823102816.5e55753b@gandalf.local.home/ Cc: stable@vger.kernel.org Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: "Luis Claudio R. Goncalves" Link: https://lore.kernel.org/20240904103428.08efdf4c@gandalf.local.home Fixes: e88ed227f639e ("tracing/timerlat: Add user-space interface") Reported-by: Tomas Glozar Signed-off-by: Steven Rostedt (Google) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_osnoise.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index a8e28f9b9271..49f10c7f7fd0 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -1612,6 +1612,7 @@ static int run_osnoise(void) static struct cpumask osnoise_cpumask; static struct cpumask save_cpumask; +static struct cpumask kthread_cpumask; /* * osnoise_sleep - sleep until the next period @@ -1675,6 +1676,7 @@ static inline int osnoise_migration_pending(void) */ mutex_lock(&interface_lock); this_cpu_osn_var()->kthread = NULL; + cpumask_clear_cpu(smp_processor_id(), &kthread_cpumask); mutex_unlock(&interface_lock); return 1; @@ -1947,9 +1949,10 @@ static void stop_kthread(unsigned int cpu) kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread; if (kthread) { - if (test_bit(OSN_WORKLOAD, &osnoise_options)) { + if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask) && + !WARN_ON(!test_bit(OSN_WORKLOAD, &osnoise_options))) { kthread_stop(kthread); - } else { + } else if (!WARN_ON(test_bit(OSN_WORKLOAD, &osnoise_options))) { /* * This is a user thread waiting on the timerlat_fd. We need * to close all users, and the best way to guarantee this is @@ -2021,6 +2024,7 @@ static int start_kthread(unsigned int cpu) } per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread; + cpumask_set_cpu(cpu, &kthread_cpumask); return 0; } @@ -2048,8 +2052,16 @@ static int start_per_cpu_kthreads(void) */ cpumask_and(current_mask, cpu_online_mask, &osnoise_cpumask); - for_each_possible_cpu(cpu) + for_each_possible_cpu(cpu) { + if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask)) { + struct task_struct *kthread; + + kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread; + if (!WARN_ON(!kthread)) + kthread_stop(kthread); + } per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL; + } for_each_cpu(cpu, current_mask) { retval = start_kthread(cpu); From 8a9d0d405159e9c796ddf771f7cff691c1a2bc1e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 5 Sep 2024 08:53:30 -0400 Subject: [PATCH 061/374] tracing/timerlat: Only clear timer if a kthread exists commit e6a53481da292d970d1edf0d8831121d1c5e2f0d upstream. The timerlat tracer can use user space threads to check for osnoise and timer latency. If the program using this is killed via a SIGTERM, the threads are shutdown one at a time and another tracing instance can start up resetting the threads before they are fully closed. That causes the hrtimer assigned to the kthread to be shutdown and freed twice when the dying thread finally closes the file descriptors, causing a use-after-free bug. Only cancel the hrtimer if the associated thread is still around. Also add the interface_lock around the resetting of the tlat_var->kthread. Note, this is just a quick fix that can be backported to stable. A real fix is to have a better synchronization between the shutdown of old threads and the starting of new ones. Link: https://lore.kernel.org/all/20240820130001.124768-1-tglozar@redhat.com/ Cc: stable@vger.kernel.org Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: "Luis Claudio R. Goncalves" Link: https://lore.kernel.org/20240905085330.45985730@gandalf.local.home Fixes: e88ed227f639e ("tracing/timerlat: Add user-space interface") Reported-by: Tomas Glozar Signed-off-by: Steven Rostedt (Google) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_osnoise.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index 49f10c7f7fd0..32993e380c2f 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -252,6 +252,11 @@ static inline struct timerlat_variables *this_cpu_tmr_var(void) return this_cpu_ptr(&per_cpu_timerlat_var); } +/* + * Protect the interface. + */ +static struct mutex interface_lock; + /* * tlat_var_reset - Reset the values of the given timerlat_variables */ @@ -259,14 +264,20 @@ static inline void tlat_var_reset(void) { struct timerlat_variables *tlat_var; int cpu; + + /* Synchronize with the timerlat interfaces */ + mutex_lock(&interface_lock); /* * So far, all the values are initialized as 0, so * zeroing the structure is perfect. */ for_each_cpu(cpu, cpu_online_mask) { tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu); + if (tlat_var->kthread) + hrtimer_cancel(&tlat_var->timer); memset(tlat_var, 0, sizeof(*tlat_var)); } + mutex_unlock(&interface_lock); } #else /* CONFIG_TIMERLAT_TRACER */ #define tlat_var_reset() do {} while (0) @@ -331,11 +342,6 @@ struct timerlat_sample { }; #endif -/* - * Protect the interface. - */ -static struct mutex interface_lock; - /* * Tracer data. */ @@ -2591,7 +2597,8 @@ static int timerlat_fd_release(struct inode *inode, struct file *file) osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu); tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu); - hrtimer_cancel(&tlat_var->timer); + if (tlat_var->kthread) + hrtimer_cancel(&tlat_var->timer); memset(tlat_var, 0, sizeof(*tlat_var)); osn_var->sampling = 0; From aec14e97c866f3c32e613933da7bdeadba76c6ff Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Tue, 27 Aug 2024 20:46:54 +0800 Subject: [PATCH 062/374] tracing: Avoid possible softlockup in tracing_iter_reset() commit 49aa8a1f4d6800721c7971ed383078257f12e8f9 upstream. In __tracing_open(), when max latency tracers took place on the cpu, the time start of its buffer would be updated, then event entries with timestamps being earlier than start of the buffer would be skipped (see tracing_iter_reset()). Softlockup will occur if the kernel is non-preemptible and too many entries were skipped in the loop that reset every cpu buffer, so add cond_resched() to avoid it. Cc: stable@vger.kernel.org Fixes: 2f26ebd549b9a ("tracing: use timestamp to determine start of latency traces") Link: https://lore.kernel.org/20240827124654.3817443-1-zhengyejian@huaweicloud.com Suggested-by: Steven Rostedt Signed-off-by: Zheng Yejian Signed-off-by: Steven Rostedt (Google) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index cb507860163d..c566ad068b40 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3958,6 +3958,8 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) break; entries++; ring_buffer_iter_advance(buf_iter); + /* This could be a big loop */ + cond_resched(); } per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; From 4679272d5252720746fd9c5465352cbc5665f230 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 5 Sep 2024 11:33:59 -0400 Subject: [PATCH 063/374] tracing/timerlat: Add interface_lock around clearing of kthread in stop_kthread() commit 5bfbcd1ee57b607fd29e4645c7f350dd385dd9ad upstream. The timerlat interface will get and put the task that is part of the "kthread" field of the osn_var to keep it around until all references are released. But here's a race in the "stop_kthread()" code that will call put_task_struct() on the kthread if it is not a kernel thread. This can race with the releasing of the references to that task struct and the put_task_struct() can be called twice when it should have been called just once. Take the interface_lock() in stop_kthread() to synchronize this change. But to do so, the function stop_per_cpu_kthreads() needs to change the loop from for_each_online_cpu() to for_each_possible_cpu() and remove the cpu_read_lock(), as the interface_lock can not be taken while the cpu locks are held. The only side effect of this change is that it may do some extra work, as the per_cpu variables of the offline CPUs would not be set anyway, and would simply be skipped in the loop. Remove unneeded "return;" in stop_kthread(). Cc: stable@vger.kernel.org Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: Tomas Glozar Cc: John Kacur Cc: "Luis Claudio R. Goncalves" Link: https://lore.kernel.org/20240905113359.2b934242@gandalf.local.home Fixes: e88ed227f639e ("tracing/timerlat: Add user-space interface") Signed-off-by: Steven Rostedt (Google) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_osnoise.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index 32993e380c2f..5b06f67879f5 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -1953,8 +1953,12 @@ static void stop_kthread(unsigned int cpu) { struct task_struct *kthread; + mutex_lock(&interface_lock); kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread; if (kthread) { + per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL; + mutex_unlock(&interface_lock); + if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask) && !WARN_ON(!test_bit(OSN_WORKLOAD, &osnoise_options))) { kthread_stop(kthread); @@ -1967,8 +1971,8 @@ static void stop_kthread(unsigned int cpu) kill_pid(kthread->thread_pid, SIGKILL, 1); put_task_struct(kthread); } - per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL; } else { + mutex_unlock(&interface_lock); /* if no workload, just return */ if (!test_bit(OSN_WORKLOAD, &osnoise_options)) { /* @@ -1976,7 +1980,6 @@ static void stop_kthread(unsigned int cpu) */ per_cpu(per_cpu_osnoise_var, cpu).sampling = false; barrier(); - return; } } } @@ -1991,12 +1994,8 @@ static void stop_per_cpu_kthreads(void) { int cpu; - cpus_read_lock(); - - for_each_online_cpu(cpu) + for_each_possible_cpu(cpu) stop_kthread(cpu); - - cpus_read_unlock(); } /* From db978287e908d48b209e374b00d847b2d785e0a9 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Tue, 13 Aug 2024 22:25:22 +0200 Subject: [PATCH 064/374] userfaultfd: don't BUG_ON() if khugepaged yanks our page table commit 4828d207dc5161dc7ddf9a4f6dcfd80c7dd7d20a upstream. Since khugepaged was changed to allow retracting page tables in file mappings without holding the mmap lock, these BUG_ON()s are wrong - get rid of them. We could also remove the preceding "if (unlikely(...))" block, but then we could reach pte_offset_map_lock() with transhuge pages not just for file mappings but also for anonymous mappings - which would probably be fine but I think is not necessarily expected. Link: https://lkml.kernel.org/r/20240813-uffd-thp-flip-fix-v2-2-5efa61078a41@google.com Fixes: 1d65b771bc08 ("mm/khugepaged: retract_page_tables() without mmap or vma lock") Signed-off-by: Jann Horn Reviewed-by: Qi Zheng Acked-by: David Hildenbrand Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Pavel Emelyanov Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/userfaultfd.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index defa5109cc62..335de81ae5d4 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -805,9 +805,10 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx, err = -EFAULT; break; } - - BUG_ON(pmd_none(*dst_pmd)); - BUG_ON(pmd_trans_huge(*dst_pmd)); + /* + * For shmem mappings, khugepaged is allowed to remove page + * tables under us; pte_offset_map_lock() will deal with that. + */ err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, src_addr, flags, &folio); From 98cc18b1b71e23fe81a5194ed432b20c2d81a01a Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Tue, 13 Aug 2024 22:25:21 +0200 Subject: [PATCH 065/374] userfaultfd: fix checks for huge PMDs commit 71c186efc1b2cf1aeabfeff3b9bd5ac4c5ac14d8 upstream. Patch series "userfaultfd: fix races around pmd_trans_huge() check", v2. The pmd_trans_huge() code in mfill_atomic() is wrong in three different ways depending on kernel version: 1. The pmd_trans_huge() check is racy and can lead to a BUG_ON() (if you hit the right two race windows) - I've tested this in a kernel build with some extra mdelay() calls. See the commit message for a description of the race scenario. On older kernels (before 6.5), I think the same bug can even theoretically lead to accessing transhuge page contents as a page table if you hit the right 5 narrow race windows (I haven't tested this case). 2. As pointed out by Qi Zheng, pmd_trans_huge() is not sufficient for detecting PMDs that don't point to page tables. On older kernels (before 6.5), you'd just have to win a single fairly wide race to hit this. I've tested this on 6.1 stable by racing migration (with a mdelay() patched into try_to_migrate()) against UFFDIO_ZEROPAGE - on my x86 VM, that causes a kernel oops in ptlock_ptr(). 3. On newer kernels (>=6.5), for shmem mappings, khugepaged is allowed to yank page tables out from under us (though I haven't tested that), so I think the BUG_ON() checks in mfill_atomic() are just wrong. I decided to write two separate fixes for these (one fix for bugs 1+2, one fix for bug 3), so that the first fix can be backported to kernels affected by bugs 1+2. This patch (of 2): This fixes two issues. I discovered that the following race can occur: mfill_atomic other thread ============ ============ pmdp_get_lockless() [reads none pmd] __pte_alloc [no-op] BUG_ON(pmd_none(*dst_pmd)) I have experimentally verified this in a kernel with extra mdelay() calls; the BUG_ON(pmd_none(*dst_pmd)) triggers. On kernels newer than commit 0d940a9b270b ("mm/pgtable: allow pte_offset_map[_lock]() to fail"), this can't lead to anything worse than a BUG_ON(), since the page table access helpers are actually designed to deal with page tables concurrently disappearing; but on older kernels (<=6.4), I think we could probably theoretically race past the two BUG_ON() checks and end up treating a hugepage as a page table. The second issue is that, as Qi Zheng pointed out, there are other types of huge PMDs that pmd_trans_huge() can't catch: devmap PMDs and swap PMDs (in particular, migration PMDs). On <=6.4, this is worse than the first issue: If mfill_atomic() runs on a PMD that contains a migration entry (which just requires winning a single, fairly wide race), it will pass the PMD to pte_offset_map_lock(), which assumes that the PMD points to a page table. Breakage follows: First, the kernel tries to take the PTE lock (which will crash or maybe worse if there is no "struct page" for the address bits in the migration entry PMD - I think at least on X86 there usually is no corresponding "struct page" thanks to the PTE inversion mitigation, amd64 looks different). If that didn't crash, the kernel would next try to write a PTE into what it wrongly thinks is a page table. As part of fixing these issues, get rid of the check for pmd_trans_huge() before __pte_alloc() - that's redundant, we're going to have to check for that after the __pte_alloc() anyway. Backport note: pmdp_get_lockless() is pmd_read_atomic() in older kernels. Link: https://lkml.kernel.org/r/20240813-uffd-thp-flip-fix-v2-0-5efa61078a41@google.com Link: https://lkml.kernel.org/r/20240813-uffd-thp-flip-fix-v2-1-5efa61078a41@google.com Fixes: c1a4de99fada ("userfaultfd: mcopy_atomic|mfill_zeropage: UFFDIO_COPY|UFFDIO_ZEROPAGE preparation") Signed-off-by: Jann Horn Acked-by: David Hildenbrand Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Jann Horn Cc: Pavel Emelyanov Cc: Qi Zheng Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/userfaultfd.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 335de81ae5d4..75f47706882f 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -787,21 +787,23 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx, } dst_pmdval = pmdp_get_lockless(dst_pmd); - /* - * If the dst_pmd is mapped as THP don't - * override it and just be strict. - */ - if (unlikely(pmd_trans_huge(dst_pmdval))) { - err = -EEXIST; - break; - } if (unlikely(pmd_none(dst_pmdval)) && unlikely(__pte_alloc(dst_mm, dst_pmd))) { err = -ENOMEM; break; } - /* If an huge pmd materialized from under us fail */ - if (unlikely(pmd_trans_huge(*dst_pmd))) { + dst_pmdval = pmdp_get_lockless(dst_pmd); + /* + * If the dst_pmd is THP don't override it and just be strict. + * (This includes the case where the PMD used to be THP and + * changed back to none after __pte_alloc().) + */ + if (unlikely(!pmd_present(dst_pmdval) || pmd_trans_huge(dst_pmdval) || + pmd_devmap(dst_pmdval))) { + err = -EEXIST; + break; + } + if (unlikely(pmd_bad(dst_pmdval))) { err = -EFAULT; break; } From 0a11262549ac2ac6fb98c7cd40a67136817e5a52 Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Mon, 26 Aug 2024 19:20:56 +0800 Subject: [PATCH 066/374] fscache: delete fscache_cookie_lru_timer when fscache exits to avoid UAF commit 72a6e22c604c95ddb3b10b5d3bb85b6ff4dbc34f upstream. The fscache_cookie_lru_timer is initialized when the fscache module is inserted, but is not deleted when the fscache module is removed. If timer_reduce() is called before removing the fscache module, the fscache_cookie_lru_timer will be added to the timer list of the current cpu. Afterwards, a use-after-free will be triggered in the softIRQ after removing the fscache module, as follows: ================================================================== BUG: unable to handle page fault for address: fffffbfff803c9e9 PF: supervisor read access in kernel mode PF: error_code(0x0000) - not-present page PGD 21ffea067 P4D 21ffea067 PUD 21ffe6067 PMD 110a7c067 PTE 0 Oops: Oops: 0000 [#1] PREEMPT SMP KASAN PTI CPU: 1 UID: 0 PID: 0 Comm: swapper/1 Tainted: G W 6.11.0-rc3 #855 Tainted: [W]=WARN RIP: 0010:__run_timer_base.part.0+0x254/0x8a0 Call Trace: tmigr_handle_remote_up+0x627/0x810 __walk_groups.isra.0+0x47/0x140 tmigr_handle_remote+0x1fa/0x2f0 handle_softirqs+0x180/0x590 irq_exit_rcu+0x84/0xb0 sysvec_apic_timer_interrupt+0x6e/0x90 asm_sysvec_apic_timer_interrupt+0x1a/0x20 RIP: 0010:default_idle+0xf/0x20 default_idle_call+0x38/0x60 do_idle+0x2b5/0x300 cpu_startup_entry+0x54/0x60 start_secondary+0x20d/0x280 common_startup_64+0x13e/0x148 Modules linked in: [last unloaded: netfs] ================================================================== Therefore delete fscache_cookie_lru_timer when removing the fscahe module. Fixes: 12bb21a29c19 ("fscache: Implement cookie user counting and resource pinning") Cc: stable@kernel.org Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240826112056.2458299-1-libaokun@huaweicloud.com Acked-by: David Howells Signed-off-by: Christian Brauner Signed-off-by: Greg Kroah-Hartman --- fs/netfs/fscache_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/netfs/fscache_main.c b/fs/netfs/fscache_main.c index bf9b33d26e31..8e0aacbcfc8e 100644 --- a/fs/netfs/fscache_main.c +++ b/fs/netfs/fscache_main.c @@ -103,6 +103,7 @@ void __exit fscache_exit(void) kmem_cache_destroy(fscache_cookie_jar); fscache_proc_cleanup(); + timer_shutdown_sync(&fscache_cookie_lru_timer); destroy_workqueue(fscache_wq); pr_notice("FS-Cache unloaded\n"); } From f579d17a86448779f9642ad8baca6e3036a8e2d6 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 4 Sep 2024 13:16:05 -0400 Subject: [PATCH 067/374] eventfs: Use list_del_rcu() for SRCU protected list variable commit d2603279c7d645bf0d11fa253b23f1ab48fc8d3c upstream. Chi Zhiling reported: We found a null pointer accessing in tracefs[1], the reason is that the variable 'ei_child' is set to LIST_POISON1, that means the list was removed in eventfs_remove_rec. so when access the ei_child->is_freed, the panic triggered. by the way, the following script can reproduce this panic loop1 (){ while true do echo "p:kp submit_bio" > /sys/kernel/debug/tracing/kprobe_events echo "" > /sys/kernel/debug/tracing/kprobe_events done } loop2 (){ while true do tree /sys/kernel/debug/tracing/events/kprobes/ done } loop1 & loop2 [1]: [ 1147.959632][T17331] Unable to handle kernel paging request at virtual address dead000000000150 [ 1147.968239][T17331] Mem abort info: [ 1147.971739][T17331] ESR = 0x0000000096000004 [ 1147.976172][T17331] EC = 0x25: DABT (current EL), IL = 32 bits [ 1147.982171][T17331] SET = 0, FnV = 0 [ 1147.985906][T17331] EA = 0, S1PTW = 0 [ 1147.989734][T17331] FSC = 0x04: level 0 translation fault [ 1147.995292][T17331] Data abort info: [ 1147.998858][T17331] ISV = 0, ISS = 0x00000004, ISS2 = 0x00000000 [ 1148.005023][T17331] CM = 0, WnR = 0, TnD = 0, TagAccess = 0 [ 1148.010759][T17331] GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0 [ 1148.016752][T17331] [dead000000000150] address between user and kernel address ranges [ 1148.024571][T17331] Internal error: Oops: 0000000096000004 [#1] SMP [ 1148.030825][T17331] Modules linked in: team_mode_loadbalance team nlmon act_gact cls_flower sch_ingress bonding tls macvlan dummy ib_core bridge stp llc veth amdgpu amdxcp mfd_core gpu_sched drm_exec drm_buddy radeon crct10dif_ce video drm_suballoc_helper ghash_ce drm_ttm_helper sha2_ce ttm sha256_arm64 i2c_algo_bit sha1_ce sbsa_gwdt cp210x drm_display_helper cec sr_mod cdrom drm_kms_helper binfmt_misc sg loop fuse drm dm_mod nfnetlink ip_tables autofs4 [last unloaded: tls] [ 1148.072808][T17331] CPU: 3 PID: 17331 Comm: ls Tainted: G W ------- ---- 6.6.43 #2 [ 1148.081751][T17331] Source Version: 21b3b386e948bedd29369af66f3e98ab01b1c650 [ 1148.088783][T17331] Hardware name: Greatwall GW-001M1A-FTF/GW-001M1A-FTF, BIOS KunLun BIOS V4.0 07/16/2020 [ 1148.098419][T17331] pstate: 20000005 (nzCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 1148.106060][T17331] pc : eventfs_iterate+0x2c0/0x398 [ 1148.111017][T17331] lr : eventfs_iterate+0x2fc/0x398 [ 1148.115969][T17331] sp : ffff80008d56bbd0 [ 1148.119964][T17331] x29: ffff80008d56bbf0 x28: ffff001ff5be2600 x27: 0000000000000000 [ 1148.127781][T17331] x26: ffff001ff52ca4e0 x25: 0000000000009977 x24: dead000000000100 [ 1148.135598][T17331] x23: 0000000000000000 x22: 000000000000000b x21: ffff800082645f10 [ 1148.143415][T17331] x20: ffff001fddf87c70 x19: ffff80008d56bc90 x18: 0000000000000000 [ 1148.151231][T17331] x17: 0000000000000000 x16: 0000000000000000 x15: ffff001ff52ca4e0 [ 1148.159048][T17331] x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000 [ 1148.166864][T17331] x11: 0000000000000000 x10: 0000000000000000 x9 : ffff8000804391d0 [ 1148.174680][T17331] x8 : 0000000180000000 x7 : 0000000000000018 x6 : 0000aaab04b92862 [ 1148.182498][T17331] x5 : 0000aaab04b92862 x4 : 0000000080000000 x3 : 0000000000000068 [ 1148.190314][T17331] x2 : 000000000000000f x1 : 0000000000007ea8 x0 : 0000000000000001 [ 1148.198131][T17331] Call trace: [ 1148.201259][T17331] eventfs_iterate+0x2c0/0x398 [ 1148.205864][T17331] iterate_dir+0x98/0x188 [ 1148.210036][T17331] __arm64_sys_getdents64+0x78/0x160 [ 1148.215161][T17331] invoke_syscall+0x78/0x108 [ 1148.219593][T17331] el0_svc_common.constprop.0+0x48/0xf0 [ 1148.224977][T17331] do_el0_svc+0x24/0x38 [ 1148.228974][T17331] el0_svc+0x40/0x168 [ 1148.232798][T17331] el0t_64_sync_handler+0x120/0x130 [ 1148.237836][T17331] el0t_64_sync+0x1a4/0x1a8 [ 1148.242182][T17331] Code: 54ffff6c f9400676 910006d6 f9000676 (b9405300) [ 1148.248955][T17331] ---[ end trace 0000000000000000 ]--- The issue is that list_del() is used on an SRCU protected list variable before the synchronization occurs. This can poison the list pointers while there is a reader iterating the list. This is simply fixed by using list_del_rcu() that is specifically made for this purpose. Link: https://lore.kernel.org/linux-trace-kernel/20240829085025.3600021-1-chizhiling@163.com/ Cc: stable@vger.kernel.org Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Link: https://lore.kernel.org/20240904131605.640d42b1@gandalf.local.home Fixes: 43aa6f97c2d03 ("eventfs: Get rid of dentry pointers without refcounts") Reported-by: Chi Zhiling Tested-by: Chi Zhiling Signed-off-by: Steven Rostedt (Google) Signed-off-by: Greg Kroah-Hartman --- fs/tracefs/event_inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c index 01e99e98457d..8705c77a9e75 100644 --- a/fs/tracefs/event_inode.c +++ b/fs/tracefs/event_inode.c @@ -862,7 +862,7 @@ static void eventfs_remove_rec(struct eventfs_inode *ei, int level) list_for_each_entry(ei_child, &ei->children, list) eventfs_remove_rec(ei_child, level + 1); - list_del(&ei->list); + list_del_rcu(&ei->list); free_ei(ei); } From 4982a47154f0b50de81ee0a0b169a3fc74120a65 Mon Sep 17 00:00:00 2001 From: Souradeep Chakrabarti Date: Mon, 2 Sep 2024 05:43:47 -0700 Subject: [PATCH 068/374] net: mana: Fix error handling in mana_create_txq/rxq's NAPI cleanup commit b6ecc662037694488bfff7c9fd21c405df8411f2 upstream. Currently napi_disable() gets called during rxq and txq cleanup, even before napi is enabled and hrtimer is initialized. It causes kernel panic. ? page_fault_oops+0x136/0x2b0 ? page_counter_cancel+0x2e/0x80 ? do_user_addr_fault+0x2f2/0x640 ? refill_obj_stock+0xc4/0x110 ? exc_page_fault+0x71/0x160 ? asm_exc_page_fault+0x27/0x30 ? __mmdrop+0x10/0x180 ? __mmdrop+0xec/0x180 ? hrtimer_active+0xd/0x50 hrtimer_try_to_cancel+0x2c/0xf0 hrtimer_cancel+0x15/0x30 napi_disable+0x65/0x90 mana_destroy_rxq+0x4c/0x2f0 mana_create_rxq.isra.0+0x56c/0x6d0 ? mana_uncfg_vport+0x50/0x50 mana_alloc_queues+0x21b/0x320 ? skb_dequeue+0x5f/0x80 Cc: stable@vger.kernel.org Fixes: e1b5683ff62e ("net: mana: Move NAPI from EQ to CQ") Signed-off-by: Souradeep Chakrabarti Reviewed-by: Haiyang Zhang Reviewed-by: Shradha Gupta Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/microsoft/mana/mana_en.c | 22 +++++++++++-------- include/net/mana/mana.h | 2 ++ 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 482b9cd36950..bb77327bfa81 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1857,10 +1857,12 @@ static void mana_destroy_txq(struct mana_port_context *apc) for (i = 0; i < apc->num_queues; i++) { napi = &apc->tx_qp[i].tx_cq.napi; - napi_synchronize(napi); - napi_disable(napi); - netif_napi_del(napi); - + if (apc->tx_qp[i].txq.napi_initialized) { + napi_synchronize(napi); + napi_disable(napi); + netif_napi_del(napi); + apc->tx_qp[i].txq.napi_initialized = false; + } mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); @@ -1916,6 +1918,7 @@ static int mana_create_txq(struct mana_port_context *apc, txq->ndev = net; txq->net_txq = netdev_get_tx_queue(net, i); txq->vp_offset = apc->tx_vp_offset; + txq->napi_initialized = false; skb_queue_head_init(&txq->pending_skbs); memset(&spec, 0, sizeof(spec)); @@ -1982,6 +1985,7 @@ static int mana_create_txq(struct mana_port_context *apc, netif_napi_add_tx(net, &cq->napi, mana_poll); napi_enable(&cq->napi); + txq->napi_initialized = true; mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); } @@ -1993,7 +1997,7 @@ static int mana_create_txq(struct mana_port_context *apc, } static void mana_destroy_rxq(struct mana_port_context *apc, - struct mana_rxq *rxq, bool validate_state) + struct mana_rxq *rxq, bool napi_initialized) { struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; @@ -2008,15 +2012,15 @@ static void mana_destroy_rxq(struct mana_port_context *apc, napi = &rxq->rx_cq.napi; - if (validate_state) + if (napi_initialized) { napi_synchronize(napi); - napi_disable(napi); + napi_disable(napi); + netif_napi_del(napi); + } xdp_rxq_info_unreg(&rxq->xdp_rxq); - netif_napi_del(napi); - mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); mana_deinit_cq(apc, &rxq->rx_cq); diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 9bdb1fdc7c51..5927bd9d46be 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -97,6 +97,8 @@ struct mana_txq { atomic_t pending_sends; + bool napi_initialized; + struct mana_stats_tx stats; }; From ddee07e8ad906068c29932bae0a5fe045c71b8c7 Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Thu, 29 Aug 2024 15:43:46 +0800 Subject: [PATCH 069/374] net: mctp-serial: Fix missing escapes on transmit commit f962e8361adfa84e8252d3fc3e5e6bb879f029b1 upstream. 0x7d and 0x7e bytes are meant to be escaped in the data portion of frames, but this didn't occur since next_chunk_len() had an off-by-one error. That also resulted in the final byte of a payload being written as a separate tty write op. The chunk prior to an escaped byte would be one byte short, and the next call would never test the txpos+1 case, which is where the escaped byte was located. That meant it never hit the escaping case in mctp_serial_tx_work(). Example Input: 01 00 08 c8 7e 80 02 Previous incorrect chunks from next_chunk_len(): 01 00 08 c8 7e 80 02 With this fix: 01 00 08 c8 7e 80 02 Cc: stable@vger.kernel.org Fixes: a0c2ccd9b5ad ("mctp: Add MCTP-over-serial transport binding") Signed-off-by: Matt Johnston Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/mctp/mctp-serial.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c index 5bf6fdff701c..346e6ad36054 100644 --- a/drivers/net/mctp/mctp-serial.c +++ b/drivers/net/mctp/mctp-serial.c @@ -91,8 +91,8 @@ static int next_chunk_len(struct mctp_serial *dev) * will be those non-escaped bytes, and does not include the escaped * byte. */ - for (i = 1; i + dev->txpos + 1 < dev->txlen; i++) { - if (needs_escape(dev->txbuf[dev->txpos + i + 1])) + for (i = 1; i + dev->txpos < dev->txlen; i++) { + if (needs_escape(dev->txbuf[dev->txpos + i])) break; } From ce9e83a066f4fae97e1f75a06e0f2b22f649d750 Mon Sep 17 00:00:00 2001 From: Mitchell Levy Date: Mon, 12 Aug 2024 13:44:12 -0700 Subject: [PATCH 070/374] x86/fpu: Avoid writing LBR bit to IA32_XSS unless supported commit 2848ff28d180bd63a95da8e5dcbcdd76c1beeb7b upstream. There are two distinct CPU features related to the use of XSAVES and LBR: whether LBR is itself supported and whether XSAVES supports LBR. The LBR subsystem correctly checks both in intel_pmu_arch_lbr_init(), but the XSTATE subsystem does not. The LBR bit is only removed from xfeatures_mask_independent when LBR is not supported by the CPU, but there is no validation of XSTATE support. If XSAVES does not support LBR the write to IA32_XSS causes a #GP fault, leaving the state of IA32_XSS unchanged, i.e. zero. The fault is handled with a warning and the boot continues. Consequently the next XRSTORS which tries to restore supervisor state fails with #GP because the RFBM has zero for all supervisor features, which does not match the XCOMP_BV field. As XFEATURE_MASK_FPSTATE includes supervisor features setting up the FPU causes a #GP, which ends up in fpu_reset_from_exception_fixup(). That fails due to the same problem resulting in recursive #GPs until the kernel runs out of stack space and double faults. Prevent this by storing the supported independent features in fpu_kernel_cfg during XSTATE initialization and use that cached value for retrieving the independent feature bits to be written into IA32_XSS. [ tglx: Massaged change log ] Fixes: f0dccc9da4c0 ("x86/fpu/xstate: Support dynamic supervisor feature for LBR") Suggested-by: Thomas Gleixner Signed-off-by: Mitchell Levy Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Link: https://lore.kernel.org/all/20240812-xsave-lbr-fix-v3-1-95bac1bf62f4@gmail.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/fpu/types.h | 7 +++++++ arch/x86/kernel/fpu/xstate.c | 3 +++ arch/x86/kernel/fpu/xstate.h | 4 ++-- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index eb17f31b06d2..de16862bf230 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -591,6 +591,13 @@ struct fpu_state_config { * even without XSAVE support, i.e. legacy features FP + SSE */ u64 legacy_features; + /* + * @independent_features: + * + * Features that are supported by XSAVES, but not managed as part of + * the FPU core, such as LBR + */ + u64 independent_features; }; /* FPU state configuration information */ diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index c5a026fee5e0..1339f8328db5 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -788,6 +788,9 @@ void __init fpu__init_system_xstate(unsigned int legacy_size) goto out_disable; } + fpu_kernel_cfg.independent_features = fpu_kernel_cfg.max_features & + XFEATURE_MASK_INDEPENDENT; + /* * Clear XSAVE features that are disabled in the normal CPUID. */ diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h index 05df04f39628..f2611145f3ca 100644 --- a/arch/x86/kernel/fpu/xstate.h +++ b/arch/x86/kernel/fpu/xstate.h @@ -62,9 +62,9 @@ static inline u64 xfeatures_mask_supervisor(void) static inline u64 xfeatures_mask_independent(void) { if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) - return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR; + return fpu_kernel_cfg.independent_features & ~XFEATURE_MASK_LBR; - return XFEATURE_MASK_INDEPENDENT; + return fpu_kernel_cfg.independent_features; } /* XSAVE/XRSTOR wrapper functions */ From 82e3e968defb2a054fae7a010ebc387c7f7ec143 Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Tue, 13 Aug 2024 09:48:27 +0800 Subject: [PATCH 071/374] x86/apic: Make x2apic_disable() work correctly commit 0ecc5be200c84e67114f3640064ba2bae3ba2f5a upstream. x2apic_disable() clears x2apic_state and x2apic_mode unconditionally, even when the state is X2APIC_ON_LOCKED, which prevents the kernel to disable it thereby creating inconsistent state. Due to the early state check for X2APIC_ON, the code path which warns about a locked X2APIC cannot be reached. Test for state < X2APIC_ON instead and move the clearing of the state and mode variables to the place which actually disables X2APIC. [ tglx: Massaged change log. Added Fixes tag. Moved clearing so it's at the right place for back ports ] Fixes: a57e456a7b28 ("x86/apic: Fix fallout from x2apic cleanup") Signed-off-by: Yuntao Wang Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Link: https://lore.kernel.org/all/20240813014827.895381-1-yuntao.wang@linux.dev Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/apic/apic.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 66fd4b2a37a3..373638691cd4 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1775,12 +1775,9 @@ static __init void apic_set_fixmap(bool read_apic); static __init void x2apic_disable(void) { - u32 x2apic_id, state = x2apic_state; + u32 x2apic_id; - x2apic_mode = 0; - x2apic_state = X2APIC_DISABLED; - - if (state != X2APIC_ON) + if (x2apic_state < X2APIC_ON) return; x2apic_id = read_apic_id(); @@ -1793,6 +1790,10 @@ static __init void x2apic_disable(void) } __x2apic_disable(); + + x2apic_mode = 0; + x2apic_state = X2APIC_DISABLED; + /* * Don't reread the APIC ID as it was already done from * check_x2apic() and the APIC driver still is a x2APIC variant, From 3aa52bd439860ec5c6d8653e821efdaa48197cb2 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 5 Sep 2024 14:24:38 -0400 Subject: [PATCH 072/374] Revert "drm/amdgpu: align pp_power_profile_mode with kernel docs" commit 1a8d845470941f1b6de1b392227530c097dc5e0c upstream. This reverts commit 8f614469de248a4bc55fb07e55d5f4c340c75b11. This breaks some manual setting of the profile mode in certain cases. Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3600 Signed-off-by: Alex Deucher (cherry picked from commit 7a199557643e993d4e7357860624b8aa5d8f4340) Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 95f690b70c05..45d053497f49 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -2257,7 +2257,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, smu_dpm_ctx->dpm_level = level; } - if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; @@ -2336,7 +2337,8 @@ static int smu_switch_power_profile(void *handle, workload[0] = smu->workload_setting[index]; } - if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) smu_bump_power_profile_mode(smu, workload, 0); return 0; From 930840f19ec961e4cf7ed869f86b0d955c0468c9 Mon Sep 17 00:00:00 2001 From: Baochen Qiang Date: Fri, 30 Aug 2024 15:34:19 +0800 Subject: [PATCH 073/374] Revert "wifi: ath11k: restore country code during resume" commit d3e154d7776ba57ab679fb816fb87b627fba21c9 upstream. This reverts commit 7f0343b7b8710436c1e6355c71782d32ada47e0c. We are going to revert commit 166a490f59ac ("wifi: ath11k: support hibernation"), on which this commit depends. With that commit reverted, this one is not needed any more, so revert this commit first. Signed-off-by: Baochen Qiang Acked-by: Jeff Johnson Signed-off-by: Kalle Valo Link: https://patch.msgid.link/20240830073420.5790-2-quic_bqiang@quicinc.com Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ath/ath11k/core.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 47554c361963..a14d0c65000a 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1009,16 +1009,6 @@ int ath11k_core_resume(struct ath11k_base *ab) return -ETIMEDOUT; } - if (ab->hw_params.current_cc_support && - ar->alpha2[0] != 0 && ar->alpha2[1] != 0) { - ret = ath11k_reg_set_cc(ar); - if (ret) { - ath11k_warn(ab, "failed to set country code during resume: %d\n", - ret); - return ret; - } - } - ret = ath11k_dp_rx_pktlog_start(ab); if (ret) ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n", From 436dd444d8b206bf34a2a35e1e04552c2ba326e9 Mon Sep 17 00:00:00 2001 From: Baochen Qiang Date: Fri, 30 Aug 2024 15:34:20 +0800 Subject: [PATCH 074/374] Revert "wifi: ath11k: support hibernation" commit 2f833e8948d6c88a3a257d4e426c9897b4907d5a upstream. This reverts commit 166a490f59ac10340ee5330e51c15188ce2a7f8f. There are several reports that this commit breaks system suspend on some specific Lenovo platforms. Since there is no fix available, for now revert this commit to make suspend work again on those platforms. Closes: https://bugzilla.kernel.org/show_bug.cgi?id=219196 Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2301921 Cc: # 6.10.x: d3e154d7776b: Revert "wifi: ath11k: restore country code during resume" Cc: # 6.10.x Signed-off-by: Baochen Qiang Acked-by: Jeff Johnson Signed-off-by: Kalle Valo Link: https://patch.msgid.link/20240830073420.5790-3-quic_bqiang@quicinc.com Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ath/ath11k/ahb.c | 4 +- drivers/net/wireless/ath/ath11k/core.c | 107 ++++++++----------------- drivers/net/wireless/ath/ath11k/core.h | 4 - drivers/net/wireless/ath/ath11k/hif.h | 12 +-- drivers/net/wireless/ath/ath11k/mhi.c | 12 +-- drivers/net/wireless/ath/ath11k/mhi.h | 3 +- drivers/net/wireless/ath/ath11k/pci.c | 44 ++-------- drivers/net/wireless/ath/ath11k/qmi.c | 2 +- 8 files changed, 49 insertions(+), 139 deletions(-) diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index ca0f17ddebba..0360ee5f0b65 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -413,7 +413,7 @@ static int ath11k_ahb_power_up(struct ath11k_base *ab) return ret; } -static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend) +static void ath11k_ahb_power_down(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); @@ -1261,7 +1261,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev) struct ath11k_base *ab = platform_get_drvdata(pdev); if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { - ath11k_ahb_power_down(ab, false); + ath11k_ahb_power_down(ab); ath11k_debugfs_soc_destroy(ab); ath11k_qmi_deinit_service(ab); goto qmi_fail; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index a14d0c65000a..0edfea68afad 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -906,6 +906,12 @@ int ath11k_core_suspend(struct ath11k_base *ab) return ret; } + ret = ath11k_wow_enable(ab); + if (ret) { + ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret); + return ret; + } + ret = ath11k_dp_rx_pktlog_stop(ab, false); if (ret) { ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n", @@ -916,85 +922,29 @@ int ath11k_core_suspend(struct ath11k_base *ab) ath11k_ce_stop_shadow_timers(ab); ath11k_dp_stop_shadow_timers(ab); - /* PM framework skips suspend_late/resume_early callbacks - * if other devices report errors in their suspend callbacks. - * However ath11k_core_resume() would still be called because - * here we return success thus kernel put us on dpm_suspended_list. - * Since we won't go through a power down/up cycle, there is - * no chance to call complete(&ab->restart_completed) in - * ath11k_core_restart(), making ath11k_core_resume() timeout. - * So call it here to avoid this issue. This also works in case - * no error happens thus suspend_late/resume_early get called, - * because it will be reinitialized in ath11k_core_resume_early(). - */ - complete(&ab->restart_completed); - - return 0; -} -EXPORT_SYMBOL(ath11k_core_suspend); - -int ath11k_core_suspend_late(struct ath11k_base *ab) -{ - struct ath11k_pdev *pdev; - struct ath11k *ar; - - if (!ab->hw_params.supports_suspend) - return -EOPNOTSUPP; - - /* so far single_pdev_only chips have supports_suspend as true - * and only the first pdev is valid. - */ - pdev = ath11k_core_get_single_pdev(ab); - ar = pdev->ar; - if (!ar || ar->state != ATH11K_STATE_OFF) - return 0; - ath11k_hif_irq_disable(ab); ath11k_hif_ce_irq_disable(ab); - ath11k_hif_power_down(ab, true); + ret = ath11k_hif_suspend(ab); + if (ret) { + ath11k_warn(ab, "failed to suspend hif: %d\n", ret); + return ret; + } return 0; } -EXPORT_SYMBOL(ath11k_core_suspend_late); - -int ath11k_core_resume_early(struct ath11k_base *ab) -{ - int ret; - struct ath11k_pdev *pdev; - struct ath11k *ar; - - if (!ab->hw_params.supports_suspend) - return -EOPNOTSUPP; - - /* so far single_pdev_only chips have supports_suspend as true - * and only the first pdev is valid. - */ - pdev = ath11k_core_get_single_pdev(ab); - ar = pdev->ar; - if (!ar || ar->state != ATH11K_STATE_OFF) - return 0; - - reinit_completion(&ab->restart_completed); - ret = ath11k_hif_power_up(ab); - if (ret) - ath11k_warn(ab, "failed to power up hif during resume: %d\n", ret); - - return ret; -} -EXPORT_SYMBOL(ath11k_core_resume_early); +EXPORT_SYMBOL(ath11k_core_suspend); int ath11k_core_resume(struct ath11k_base *ab) { int ret; struct ath11k_pdev *pdev; struct ath11k *ar; - long time_left; if (!ab->hw_params.supports_suspend) return -EOPNOTSUPP; - /* so far single_pdev_only chips have supports_suspend as true + /* so far signle_pdev_only chips have supports_suspend as true * and only the first pdev is valid. */ pdev = ath11k_core_get_single_pdev(ab); @@ -1002,19 +952,29 @@ int ath11k_core_resume(struct ath11k_base *ab) if (!ar || ar->state != ATH11K_STATE_OFF) return 0; - time_left = wait_for_completion_timeout(&ab->restart_completed, - ATH11K_RESET_TIMEOUT_HZ); - if (time_left == 0) { - ath11k_warn(ab, "timeout while waiting for restart complete"); - return -ETIMEDOUT; + ret = ath11k_hif_resume(ab); + if (ret) { + ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret); + return ret; } + ath11k_hif_ce_irq_enable(ab); + ath11k_hif_irq_enable(ab); + ret = ath11k_dp_rx_pktlog_start(ab); - if (ret) + if (ret) { ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n", ret); + return ret; + } - return ret; + ret = ath11k_wow_wakeup(ab); + if (ret) { + ath11k_warn(ab, "failed to wakeup wow during resume: %d\n", ret); + return ret; + } + + return 0; } EXPORT_SYMBOL(ath11k_core_resume); @@ -2109,8 +2069,6 @@ static void ath11k_core_restart(struct work_struct *work) if (!ab->is_reset) ath11k_core_post_reconfigure_recovery(ab); - - complete(&ab->restart_completed); } static void ath11k_core_reset(struct work_struct *work) @@ -2180,7 +2138,7 @@ static void ath11k_core_reset(struct work_struct *work) ath11k_hif_irq_disable(ab); ath11k_hif_ce_irq_disable(ab); - ath11k_hif_power_down(ab, false); + ath11k_hif_power_down(ab); ath11k_hif_power_up(ab); ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n"); @@ -2253,7 +2211,7 @@ void ath11k_core_deinit(struct ath11k_base *ab) mutex_unlock(&ab->core_lock); - ath11k_hif_power_down(ab, false); + ath11k_hif_power_down(ab); ath11k_mac_destroy(ab); ath11k_core_soc_destroy(ab); ath11k_fw_destroy(ab); @@ -2306,7 +2264,6 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); init_completion(&ab->wow.wakeup_completed); - init_completion(&ab->restart_completed); ab->dev = dev; ab->hif.bus = bus; diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 205f40ee6b66..141ba4487cb4 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -1033,8 +1033,6 @@ struct ath11k_base { DECLARE_BITMAP(fw_features, ATH11K_FW_FEATURE_COUNT); } fw; - struct completion restart_completed; - #ifdef CONFIG_NL80211_TESTMODE struct { u32 data_pos; @@ -1234,10 +1232,8 @@ void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd); int ath11k_core_check_dt(struct ath11k_base *ath11k); int ath11k_core_check_smbios(struct ath11k_base *ab); void ath11k_core_halt(struct ath11k *ar); -int ath11k_core_resume_early(struct ath11k_base *ab); int ath11k_core_resume(struct ath11k_base *ab); int ath11k_core_suspend(struct ath11k_base *ab); -int ath11k_core_suspend_late(struct ath11k_base *ab); void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab); bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h index c4c6cc09c7c1..674ff772b181 100644 --- a/drivers/net/wireless/ath/ath11k/hif.h +++ b/drivers/net/wireless/ath/ath11k/hif.h @@ -18,7 +18,7 @@ struct ath11k_hif_ops { int (*start)(struct ath11k_base *ab); void (*stop)(struct ath11k_base *ab); int (*power_up)(struct ath11k_base *ab); - void (*power_down)(struct ath11k_base *ab, bool is_suspend); + void (*power_down)(struct ath11k_base *ab); int (*suspend)(struct ath11k_base *ab); int (*resume)(struct ath11k_base *ab); int (*map_service_to_pipe)(struct ath11k_base *ab, u16 service_id, @@ -67,18 +67,12 @@ static inline void ath11k_hif_irq_disable(struct ath11k_base *ab) static inline int ath11k_hif_power_up(struct ath11k_base *ab) { - if (!ab->hif.ops->power_up) - return -EOPNOTSUPP; - return ab->hif.ops->power_up(ab); } -static inline void ath11k_hif_power_down(struct ath11k_base *ab, bool is_suspend) +static inline void ath11k_hif_power_down(struct ath11k_base *ab) { - if (!ab->hif.ops->power_down) - return; - - ab->hif.ops->power_down(ab, is_suspend); + ab->hif.ops->power_down(ab); } static inline int ath11k_hif_suspend(struct ath11k_base *ab) diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index ab182690aed3..6974a551883f 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -453,17 +453,9 @@ int ath11k_mhi_start(struct ath11k_pci *ab_pci) return 0; } -void ath11k_mhi_stop(struct ath11k_pci *ab_pci, bool is_suspend) +void ath11k_mhi_stop(struct ath11k_pci *ab_pci) { - /* During suspend we need to use mhi_power_down_keep_dev() - * workaround, otherwise ath11k_core_resume() will timeout - * during resume. - */ - if (is_suspend) - mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true); - else - mhi_power_down(ab_pci->mhi_ctrl, true); - + mhi_power_down(ab_pci->mhi_ctrl, true); mhi_unprepare_after_power_down(ab_pci->mhi_ctrl); } diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h index 2d567705e732..a682aad52fc5 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.h +++ b/drivers/net/wireless/ath/ath11k/mhi.h @@ -18,7 +18,7 @@ #define MHICTRL_RESET_MASK 0x2 int ath11k_mhi_start(struct ath11k_pci *ar_pci); -void ath11k_mhi_stop(struct ath11k_pci *ar_pci, bool is_suspend); +void ath11k_mhi_stop(struct ath11k_pci *ar_pci); int ath11k_mhi_register(struct ath11k_pci *ar_pci); void ath11k_mhi_unregister(struct ath11k_pci *ar_pci); void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab); @@ -26,4 +26,5 @@ void ath11k_mhi_clear_vector(struct ath11k_base *ab); int ath11k_mhi_suspend(struct ath11k_pci *ar_pci); int ath11k_mhi_resume(struct ath11k_pci *ar_pci); + #endif diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 8d63b84d1261..be9d2c69cc41 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -638,7 +638,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab) return 0; } -static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend) +static void ath11k_pci_power_down(struct ath11k_base *ab) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); @@ -649,7 +649,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend) ath11k_pci_msi_disable(ab_pci); - ath11k_mhi_stop(ab_pci, is_suspend); + ath11k_mhi_stop(ab_pci); clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags); ath11k_pci_sw_reset(ab_pci->ab, false); } @@ -970,7 +970,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev) ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { - ath11k_pci_power_down(ab, false); + ath11k_pci_power_down(ab); ath11k_debugfs_soc_destroy(ab); ath11k_qmi_deinit_service(ab); goto qmi_fail; @@ -998,7 +998,7 @@ static void ath11k_pci_shutdown(struct pci_dev *pdev) struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); - ath11k_pci_power_down(ab, false); + ath11k_pci_power_down(ab); } static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev) @@ -1035,39 +1035,9 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev) return ret; } -static __maybe_unused int ath11k_pci_pm_suspend_late(struct device *dev) -{ - struct ath11k_base *ab = dev_get_drvdata(dev); - int ret; - - ret = ath11k_core_suspend_late(ab); - if (ret) - ath11k_warn(ab, "failed to late suspend core: %d\n", ret); - - /* Similar to ath11k_pci_pm_suspend(), we return success here - * even error happens, to allow system suspend/hibernation survive. - */ - return 0; -} - -static __maybe_unused int ath11k_pci_pm_resume_early(struct device *dev) -{ - struct ath11k_base *ab = dev_get_drvdata(dev); - int ret; - - ret = ath11k_core_resume_early(ab); - if (ret) - ath11k_warn(ab, "failed to early resume core: %d\n", ret); - - return ret; -} - -static const struct dev_pm_ops __maybe_unused ath11k_pci_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend, - ath11k_pci_pm_resume) - SET_LATE_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend_late, - ath11k_pci_pm_resume_early) -}; +static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops, + ath11k_pci_pm_suspend, + ath11k_pci_pm_resume); static struct pci_driver ath11k_pci_driver = { .name = "ath11k_pci", diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index aa160e6fe24f..e0d052f186b2 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -2877,7 +2877,7 @@ int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab) } /* reset the firmware */ - ath11k_hif_power_down(ab, false); + ath11k_hif_power_down(ab); ath11k_hif_power_up(ab); ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n"); return 0; From 126d72b726c4cf1119f3a7fe413a78d341c3fea9 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Tue, 20 Aug 2024 20:07:44 -0700 Subject: [PATCH 075/374] tcp_bpf: fix return value of tcp_bpf_sendmsg() commit fe1910f9337bd46a9343967b547ccab26b4b2c6e upstream. When we cork messages in psock->cork, the last message triggers the flushing will result in sending a sk_msg larger than the current message size. In this case, in tcp_bpf_send_verdict(), 'copied' becomes negative at least in the following case: 468 case __SK_DROP: 469 default: 470 sk_msg_free_partial(sk, msg, tosend); 471 sk_msg_apply_bytes(psock, tosend); 472 *copied -= (tosend + delta); // <==== HERE 473 return -EACCES; Therefore, it could lead to the following BUG with a proper value of 'copied' (thanks to syzbot). We should not use negative 'copied' as a return value here. ------------[ cut here ]------------ kernel BUG at net/socket.c:733! Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP Modules linked in: CPU: 0 UID: 0 PID: 3265 Comm: syz-executor510 Not tainted 6.11.0-rc3-syzkaller-00060-gd07b43284ab3 #0 Hardware name: linux,dummy-virt (DT) pstate: 61400009 (nZCv daif +PAN -UAO -TCO +DIT -SSBS BTYPE=--) pc : sock_sendmsg_nosec net/socket.c:733 [inline] pc : sock_sendmsg_nosec net/socket.c:728 [inline] pc : __sock_sendmsg+0x5c/0x60 net/socket.c:745 lr : sock_sendmsg_nosec net/socket.c:730 [inline] lr : __sock_sendmsg+0x54/0x60 net/socket.c:745 sp : ffff800088ea3b30 x29: ffff800088ea3b30 x28: fbf00000062bc900 x27: 0000000000000000 x26: ffff800088ea3bc0 x25: ffff800088ea3bc0 x24: 0000000000000000 x23: f9f00000048dc000 x22: 0000000000000000 x21: ffff800088ea3d90 x20: f9f00000048dc000 x19: ffff800088ea3d90 x18: 0000000000000001 x17: 0000000000000000 x16: 0000000000000000 x15: 000000002002ffaf x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000 x11: 0000000000000000 x10: ffff8000815849c0 x9 : ffff8000815b49c0 x8 : 0000000000000000 x7 : 000000000000003f x6 : 0000000000000000 x5 : 00000000000007e0 x4 : fff07ffffd239000 x3 : fbf00000062bc900 x2 : 0000000000000000 x1 : 0000000000000000 x0 : 00000000fffffdef Call trace: sock_sendmsg_nosec net/socket.c:733 [inline] __sock_sendmsg+0x5c/0x60 net/socket.c:745 ____sys_sendmsg+0x274/0x2ac net/socket.c:2597 ___sys_sendmsg+0xac/0x100 net/socket.c:2651 __sys_sendmsg+0x84/0xe0 net/socket.c:2680 __do_sys_sendmsg net/socket.c:2689 [inline] __se_sys_sendmsg net/socket.c:2687 [inline] __arm64_sys_sendmsg+0x24/0x30 net/socket.c:2687 __invoke_syscall arch/arm64/kernel/syscall.c:35 [inline] invoke_syscall+0x48/0x110 arch/arm64/kernel/syscall.c:49 el0_svc_common.constprop.0+0x40/0xe0 arch/arm64/kernel/syscall.c:132 do_el0_svc+0x1c/0x28 arch/arm64/kernel/syscall.c:151 el0_svc+0x34/0xec arch/arm64/kernel/entry-common.c:712 el0t_64_sync_handler+0x100/0x12c arch/arm64/kernel/entry-common.c:730 el0t_64_sync+0x19c/0x1a0 arch/arm64/kernel/entry.S:598 Code: f9404463 d63f0060 3108441f 54fffe81 (d4210000) ---[ end trace 0000000000000000 ]--- Fixes: 4f738adba30a ("bpf: create tcp_bpf_ulp allowing BPF to monitor socket TX/RX data") Reported-by: syzbot+58c03971700330ce14d8@syzkaller.appspotmail.com Cc: Jakub Sitnicki Signed-off-by: Cong Wang Reviewed-by: John Fastabend Acked-by: Martin KaFai Lau Link: https://patch.msgid.link/20240821030744.320934-1-xiyou.wangcong@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_bpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 53b0d62fd2c2..fe6178715ba0 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -577,7 +577,7 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) err = sk_stream_error(sk, msg->msg_flags, err); release_sock(sk); sk_psock_put(sk, psock); - return copied ? copied : err; + return copied > 0 ? copied : err; } enum { From 47abd8adddbc0aecb8f231269ef659148d5dabe4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 4 Sep 2024 14:44:18 +0000 Subject: [PATCH 076/374] ila: call nf_unregister_net_hooks() sooner commit 031ae72825cef43e4650140b800ad58bf7a6a466 upstream. syzbot found an use-after-free Read in ila_nf_input [1] Issue here is that ila_xlat_exit_net() frees the rhashtable, then call nf_unregister_net_hooks(). It should be done in the reverse way, with a synchronize_rcu(). This is a good match for a pre_exit() method. [1] BUG: KASAN: use-after-free in rht_key_hashfn include/linux/rhashtable.h:159 [inline] BUG: KASAN: use-after-free in __rhashtable_lookup include/linux/rhashtable.h:604 [inline] BUG: KASAN: use-after-free in rhashtable_lookup include/linux/rhashtable.h:646 [inline] BUG: KASAN: use-after-free in rhashtable_lookup_fast+0x77a/0x9b0 include/linux/rhashtable.h:672 Read of size 4 at addr ffff888064620008 by task ksoftirqd/0/16 CPU: 0 UID: 0 PID: 16 Comm: ksoftirqd/0 Not tainted 6.11.0-rc4-syzkaller-00238-g2ad6d23f465a #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 08/06/2024 Call Trace: __dump_stack lib/dump_stack.c:93 [inline] dump_stack_lvl+0x241/0x360 lib/dump_stack.c:119 print_address_description mm/kasan/report.c:377 [inline] print_report+0x169/0x550 mm/kasan/report.c:488 kasan_report+0x143/0x180 mm/kasan/report.c:601 rht_key_hashfn include/linux/rhashtable.h:159 [inline] __rhashtable_lookup include/linux/rhashtable.h:604 [inline] rhashtable_lookup include/linux/rhashtable.h:646 [inline] rhashtable_lookup_fast+0x77a/0x9b0 include/linux/rhashtable.h:672 ila_lookup_wildcards net/ipv6/ila/ila_xlat.c:132 [inline] ila_xlat_addr net/ipv6/ila/ila_xlat.c:652 [inline] ila_nf_input+0x1fe/0x3c0 net/ipv6/ila/ila_xlat.c:190 nf_hook_entry_hookfn include/linux/netfilter.h:154 [inline] nf_hook_slow+0xc3/0x220 net/netfilter/core.c:626 nf_hook include/linux/netfilter.h:269 [inline] NF_HOOK+0x29e/0x450 include/linux/netfilter.h:312 __netif_receive_skb_one_core net/core/dev.c:5661 [inline] __netif_receive_skb+0x1ea/0x650 net/core/dev.c:5775 process_backlog+0x662/0x15b0 net/core/dev.c:6108 __napi_poll+0xcb/0x490 net/core/dev.c:6772 napi_poll net/core/dev.c:6841 [inline] net_rx_action+0x89b/0x1240 net/core/dev.c:6963 handle_softirqs+0x2c4/0x970 kernel/softirq.c:554 run_ksoftirqd+0xca/0x130 kernel/softirq.c:928 smpboot_thread_fn+0x544/0xa30 kernel/smpboot.c:164 kthread+0x2f0/0x390 kernel/kthread.c:389 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244 The buggy address belongs to the physical page: page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x64620 flags: 0xfff00000000000(node=0|zone=1|lastcpupid=0x7ff) page_type: 0xbfffffff(buddy) raw: 00fff00000000000 ffffea0000959608 ffffea00019d9408 0000000000000000 raw: 0000000000000000 0000000000000003 00000000bfffffff 0000000000000000 page dumped because: kasan: bad access detected page_owner tracks the page as freed page last allocated via order 3, migratetype Unmovable, gfp_mask 0x52dc0(GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY|__GFP_COMP|__GFP_ZERO), pid 5242, tgid 5242 (syz-executor), ts 73611328570, free_ts 618981657187 set_page_owner include/linux/page_owner.h:32 [inline] post_alloc_hook+0x1f3/0x230 mm/page_alloc.c:1493 prep_new_page mm/page_alloc.c:1501 [inline] get_page_from_freelist+0x2e4c/0x2f10 mm/page_alloc.c:3439 __alloc_pages_noprof+0x256/0x6c0 mm/page_alloc.c:4695 __alloc_pages_node_noprof include/linux/gfp.h:269 [inline] alloc_pages_node_noprof include/linux/gfp.h:296 [inline] ___kmalloc_large_node+0x8b/0x1d0 mm/slub.c:4103 __kmalloc_large_node_noprof+0x1a/0x80 mm/slub.c:4130 __do_kmalloc_node mm/slub.c:4146 [inline] __kmalloc_node_noprof+0x2d2/0x440 mm/slub.c:4164 __kvmalloc_node_noprof+0x72/0x190 mm/util.c:650 bucket_table_alloc lib/rhashtable.c:186 [inline] rhashtable_init_noprof+0x534/0xa60 lib/rhashtable.c:1071 ila_xlat_init_net+0xa0/0x110 net/ipv6/ila/ila_xlat.c:613 ops_init+0x359/0x610 net/core/net_namespace.c:139 setup_net+0x515/0xca0 net/core/net_namespace.c:343 copy_net_ns+0x4e2/0x7b0 net/core/net_namespace.c:508 create_new_namespaces+0x425/0x7b0 kernel/nsproxy.c:110 unshare_nsproxy_namespaces+0x124/0x180 kernel/nsproxy.c:228 ksys_unshare+0x619/0xc10 kernel/fork.c:3328 __do_sys_unshare kernel/fork.c:3399 [inline] __se_sys_unshare kernel/fork.c:3397 [inline] __x64_sys_unshare+0x38/0x40 kernel/fork.c:3397 page last free pid 11846 tgid 11846 stack trace: reset_page_owner include/linux/page_owner.h:25 [inline] free_pages_prepare mm/page_alloc.c:1094 [inline] free_unref_page+0xd22/0xea0 mm/page_alloc.c:2612 __folio_put+0x2c8/0x440 mm/swap.c:128 folio_put include/linux/mm.h:1486 [inline] free_large_kmalloc+0x105/0x1c0 mm/slub.c:4565 kfree+0x1c4/0x360 mm/slub.c:4588 rhashtable_free_and_destroy+0x7c6/0x920 lib/rhashtable.c:1169 ila_xlat_exit_net+0x55/0x110 net/ipv6/ila/ila_xlat.c:626 ops_exit_list net/core/net_namespace.c:173 [inline] cleanup_net+0x802/0xcc0 net/core/net_namespace.c:640 process_one_work kernel/workqueue.c:3231 [inline] process_scheduled_works+0xa2c/0x1830 kernel/workqueue.c:3312 worker_thread+0x86d/0xd40 kernel/workqueue.c:3390 kthread+0x2f0/0x390 kernel/kthread.c:389 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244 Memory state around the buggy address: ffff88806461ff00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffff88806461ff80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc >ffff888064620000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ^ ffff888064620080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff888064620100: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff Fixes: 7f00feaf1076 ("ila: Add generic ILA translation facility") Reported-by: syzbot Signed-off-by: Eric Dumazet Cc: Tom Herbert Reviewed-by: Florian Westphal Link: https://patch.msgid.link/20240904144418.1162839-1-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ila/ila.h | 1 + net/ipv6/ila/ila_main.c | 6 ++++++ net/ipv6/ila/ila_xlat.c | 13 +++++++++---- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h index ad5f6f6ba333..85b92917849b 100644 --- a/net/ipv6/ila/ila.h +++ b/net/ipv6/ila/ila.h @@ -108,6 +108,7 @@ int ila_lwt_init(void); void ila_lwt_fini(void); int ila_xlat_init_net(struct net *net); +void ila_xlat_pre_exit_net(struct net *net); void ila_xlat_exit_net(struct net *net); int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info); diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c index 69caed07315f..976c78efbae1 100644 --- a/net/ipv6/ila/ila_main.c +++ b/net/ipv6/ila/ila_main.c @@ -71,6 +71,11 @@ static __net_init int ila_init_net(struct net *net) return err; } +static __net_exit void ila_pre_exit_net(struct net *net) +{ + ila_xlat_pre_exit_net(net); +} + static __net_exit void ila_exit_net(struct net *net) { ila_xlat_exit_net(net); @@ -78,6 +83,7 @@ static __net_exit void ila_exit_net(struct net *net) static struct pernet_operations ila_net_ops = { .init = ila_init_net, + .pre_exit = ila_pre_exit_net, .exit = ila_exit_net, .id = &ila_net_id, .size = sizeof(struct ila_net), diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 67e8c9440977..534a4498e280 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -619,6 +619,15 @@ int ila_xlat_init_net(struct net *net) return 0; } +void ila_xlat_pre_exit_net(struct net *net) +{ + struct ila_net *ilan = net_generic(net, ila_net_id); + + if (ilan->xlat.hooks_registered) + nf_unregister_net_hooks(net, ila_nf_hook_ops, + ARRAY_SIZE(ila_nf_hook_ops)); +} + void ila_xlat_exit_net(struct net *net) { struct ila_net *ilan = net_generic(net, ila_net_id); @@ -626,10 +635,6 @@ void ila_xlat_exit_net(struct net *net) rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL); free_bucket_spinlocks(ilan->xlat.locks); - - if (ilan->xlat.hooks_registered) - nf_unregister_net_hooks(net, ila_nf_hook_ops, - ARRAY_SIZE(ila_nf_hook_ops)); } static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila) From d7c01c0714c04431b5e18cf17a9ea68a553d1c3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Tue, 3 Sep 2024 18:08:45 +0200 Subject: [PATCH 077/374] sched: sch_cake: fix bulk flow accounting logic for host fairness MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 546ea84d07e3e324644025e2aae2d12ea4c5896e upstream. In sch_cake, we keep track of the count of active bulk flows per host, when running in dst/src host fairness mode, which is used as the round-robin weight when iterating through flows. The count of active bulk flows is updated whenever a flow changes state. This has a peculiar interaction with the hash collision handling: when a hash collision occurs (after the set-associative hashing), the state of the hash bucket is simply updated to match the new packet that collided, and if host fairness is enabled, that also means assigning new per-host state to the flow. For this reason, the bulk flow counters of the host(s) assigned to the flow are decremented, before new state is assigned (and the counters, which may not belong to the same host anymore, are incremented again). Back when this code was introduced, the host fairness mode was always enabled, so the decrement was unconditional. When the configuration flags were introduced the *increment* was made conditional, but the *decrement* was not. Which of course can lead to a spurious decrement (and associated wrap-around to U16_MAX). AFAICT, when host fairness is disabled, the decrement and wrap-around happens as soon as a hash collision occurs (which is not that common in itself, due to the set-associative hashing). However, in most cases this is harmless, as the value is only used when host fairness mode is enabled. So in order to trigger an array overflow, sch_cake has to first be configured with host fairness disabled, and while running in this mode, a hash collision has to occur to cause the overflow. Then, the qdisc has to be reconfigured to enable host fairness, which leads to the array out-of-bounds because the wrapped-around value is retained and used as an array index. It seems that syzbot managed to trigger this, which is quite impressive in its own right. This patch fixes the issue by introducing the same conditional check on decrement as is used on increment. The original bug predates the upstreaming of cake, but the commit listed in the Fixes tag touched that code, meaning that this patch won't apply before that. Fixes: 712639929912 ("sch_cake: Make the dual modes fairer") Reported-by: syzbot+7fe7b81d602cc1e6b94d@syzkaller.appspotmail.com Signed-off-by: Toke Høiland-Jørgensen Link: https://patch.msgid.link/20240903160846.20909-1-toke@redhat.com Signed-off-by: Paolo Abeni Signed-off-by: Greg Kroah-Hartman --- net/sched/sch_cake.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 9602dafe32e6..d2f49db70523 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -786,12 +786,15 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, * queue, accept the collision, update the host tags. */ q->way_collisions++; - if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { - q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; - q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; - } allocate_src = cake_dsrc(flow_mode); allocate_dst = cake_ddst(flow_mode); + + if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { + if (allocate_src) + q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; + if (allocate_dst) + q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; + } found: /* reserve queue for future packets in same flow */ reduced_hash = outer_hash + k; From 1cf1f7e8cd47244fa947d357ef1f642d91e219a3 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sat, 10 Aug 2024 15:52:42 +0900 Subject: [PATCH 078/374] nilfs2: fix missing cleanup on rollforward recovery error commit 5787fcaab9eb5930f5378d6a1dd03d916d146622 upstream. In an error injection test of a routine for mount-time recovery, KASAN found a use-after-free bug. It turned out that if data recovery was performed using partial logs created by dsync writes, but an error occurred before starting the log writer to create a recovered checkpoint, the inodes whose data had been recovered were left in the ns_dirty_files list of the nilfs object and were not freed. Fix this issue by cleaning up inodes that have read the recovery data if the recovery routine fails midway before the log writer starts. Link: https://lkml.kernel.org/r/20240810065242.3701-1-konishi.ryusuke@gmail.com Fixes: 0f3e1c7f23f8 ("nilfs2: recovery functions") Signed-off-by: Ryusuke Konishi Tested-by: Ryusuke Konishi Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- fs/nilfs2/recovery.c | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index b638dc06df2f..61e25a980f73 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -715,6 +715,33 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs, brelse(bh); } +/** + * nilfs_abort_roll_forward - cleaning up after a failed rollforward recovery + * @nilfs: nilfs object + */ +static void nilfs_abort_roll_forward(struct the_nilfs *nilfs) +{ + struct nilfs_inode_info *ii, *n; + LIST_HEAD(head); + + /* Abandon inodes that have read recovery data */ + spin_lock(&nilfs->ns_inode_lock); + list_splice_init(&nilfs->ns_dirty_files, &head); + spin_unlock(&nilfs->ns_inode_lock); + if (list_empty(&head)) + return; + + set_nilfs_purging(nilfs); + list_for_each_entry_safe(ii, n, &head, i_dirty) { + spin_lock(&nilfs->ns_inode_lock); + list_del_init(&ii->i_dirty); + spin_unlock(&nilfs->ns_inode_lock); + + iput(&ii->vfs_inode); + } + clear_nilfs_purging(nilfs); +} + /** * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint * @nilfs: nilfs object @@ -773,15 +800,19 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, if (unlikely(err)) { nilfs_err(sb, "error %d writing segment for recovery", err); - goto failed; + goto put_root; } nilfs_finish_roll_forward(nilfs, ri); } - failed: +put_root: nilfs_put_root(root); return err; + +failed: + nilfs_abort_roll_forward(nilfs); + goto put_root; } /** From 962562d4c70c5cdeb4e955d63ff2017c4eca1aad Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 11 Aug 2024 19:03:20 +0900 Subject: [PATCH 079/374] nilfs2: protect references to superblock parameters exposed in sysfs commit 683408258917541bdb294cd717c210a04381931e upstream. The superblock buffers of nilfs2 can not only be overwritten at runtime for modifications/repairs, but they are also regularly swapped, replaced during resizing, and even abandoned when degrading to one side due to backing device issues. So, accessing them requires mutual exclusion using the reader/writer semaphore "nilfs->ns_sem". Some sysfs attribute show methods read this superblock buffer without the necessary mutual exclusion, which can cause problems with pointer dereferencing and memory access, so fix it. Link: https://lkml.kernel.org/r/20240811100320.9913-1-konishi.ryusuke@gmail.com Fixes: da7141fb78db ("nilfs2: add /sys/fs/nilfs2/ group") Signed-off-by: Ryusuke Konishi Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- fs/nilfs2/sysfs.c | 43 +++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c index 379d22e28ed6..905c7eadf967 100644 --- a/fs/nilfs2/sysfs.c +++ b/fs/nilfs2/sysfs.c @@ -836,9 +836,15 @@ ssize_t nilfs_dev_revision_show(struct nilfs_dev_attr *attr, struct the_nilfs *nilfs, char *buf) { - struct nilfs_super_block **sbp = nilfs->ns_sbp; - u32 major = le32_to_cpu(sbp[0]->s_rev_level); - u16 minor = le16_to_cpu(sbp[0]->s_minor_rev_level); + struct nilfs_super_block *raw_sb; + u32 major; + u16 minor; + + down_read(&nilfs->ns_sem); + raw_sb = nilfs->ns_sbp[0]; + major = le32_to_cpu(raw_sb->s_rev_level); + minor = le16_to_cpu(raw_sb->s_minor_rev_level); + up_read(&nilfs->ns_sem); return sysfs_emit(buf, "%d.%d\n", major, minor); } @@ -856,8 +862,13 @@ ssize_t nilfs_dev_device_size_show(struct nilfs_dev_attr *attr, struct the_nilfs *nilfs, char *buf) { - struct nilfs_super_block **sbp = nilfs->ns_sbp; - u64 dev_size = le64_to_cpu(sbp[0]->s_dev_size); + struct nilfs_super_block *raw_sb; + u64 dev_size; + + down_read(&nilfs->ns_sem); + raw_sb = nilfs->ns_sbp[0]; + dev_size = le64_to_cpu(raw_sb->s_dev_size); + up_read(&nilfs->ns_sem); return sysfs_emit(buf, "%llu\n", dev_size); } @@ -879,9 +890,15 @@ ssize_t nilfs_dev_uuid_show(struct nilfs_dev_attr *attr, struct the_nilfs *nilfs, char *buf) { - struct nilfs_super_block **sbp = nilfs->ns_sbp; + struct nilfs_super_block *raw_sb; + ssize_t len; - return sysfs_emit(buf, "%pUb\n", sbp[0]->s_uuid); + down_read(&nilfs->ns_sem); + raw_sb = nilfs->ns_sbp[0]; + len = sysfs_emit(buf, "%pUb\n", raw_sb->s_uuid); + up_read(&nilfs->ns_sem); + + return len; } static @@ -889,10 +906,16 @@ ssize_t nilfs_dev_volume_name_show(struct nilfs_dev_attr *attr, struct the_nilfs *nilfs, char *buf) { - struct nilfs_super_block **sbp = nilfs->ns_sbp; + struct nilfs_super_block *raw_sb; + ssize_t len; + + down_read(&nilfs->ns_sem); + raw_sb = nilfs->ns_sbp[0]; + len = scnprintf(buf, sizeof(raw_sb->s_volume_name), "%s\n", + raw_sb->s_volume_name); + up_read(&nilfs->ns_sem); - return scnprintf(buf, sizeof(sbp[0]->s_volume_name), "%s\n", - sbp[0]->s_volume_name); + return len; } static const char dev_readme_str[] = From 74866c16ea2183f52925fa5d76061a1fe7f7737b Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Wed, 14 Aug 2024 19:11:19 +0900 Subject: [PATCH 080/374] nilfs2: fix state management in error path of log writing function commit 6576dd6695f2afca3f4954029ac4a64f82ba60ab upstream. After commit a694291a6211 ("nilfs2: separate wait function from nilfs_segctor_write") was applied, the log writing function nilfs_segctor_do_construct() was able to issue I/O requests continuously even if user data blocks were split into multiple logs across segments, but two potential flaws were introduced in its error handling. First, if nilfs_segctor_begin_construction() fails while creating the second or subsequent logs, the log writing function returns without calling nilfs_segctor_abort_construction(), so the writeback flag set on pages/folios will remain uncleared. This causes page cache operations to hang waiting for the writeback flag. For example, truncate_inode_pages_final(), which is called via nilfs_evict_inode() when an inode is evicted from memory, will hang. Second, the NILFS_I_COLLECTED flag set on normal inodes remain uncleared. As a result, if the next log write involves checkpoint creation, that's fine, but if a partial log write is performed that does not, inodes with NILFS_I_COLLECTED set are erroneously removed from the "sc_dirty_files" list, and their data and b-tree blocks may not be written to the device, corrupting the block mapping. Fix these issues by uniformly calling nilfs_segctor_abort_construction() on failure of each step in the loop in nilfs_segctor_do_construct(), having it clean up logs and segment usages according to progress, and correcting the conditions for calling nilfs_redirty_inodes() to ensure that the NILFS_I_COLLECTED flag is cleared. Link: https://lkml.kernel.org/r/20240814101119.4070-1-konishi.ryusuke@gmail.com Fixes: a694291a6211 ("nilfs2: separate wait function from nilfs_segctor_write") Signed-off-by: Ryusuke Konishi Tested-by: Ryusuke Konishi Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- fs/nilfs2/segment.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index d02fd92cdb43..f090f19df3e3 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1788,6 +1788,9 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci, nilfs_abort_logs(&logs, ret ? : err); list_splice_tail_init(&sci->sc_segbufs, &logs); + if (list_empty(&logs)) + return; /* if the first segment buffer preparation failed */ + nilfs_cancel_segusage(&logs, nilfs->ns_sufile); nilfs_free_incomplete_logs(&logs, nilfs); @@ -2032,7 +2035,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) err = nilfs_segctor_begin_construction(sci, nilfs); if (unlikely(err)) - goto out; + goto failed; /* Update time stamp */ sci->sc_seg_ctime = ktime_get_real_seconds(); @@ -2099,10 +2102,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) return err; failed_to_write: - if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) - nilfs_redirty_inodes(&sci->sc_dirty_files); - failed: + if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_IFILE) + nilfs_redirty_inodes(&sci->sc_dirty_files); if (nilfs_doing_gc()) nilfs_redirty_inodes(&sci->sc_gc_inodes); nilfs_segctor_abort_construction(sci, nilfs, err); From c3ae6e7b970d7445a277fd7ffff6e12c18f725d0 Mon Sep 17 00:00:00 2001 From: Fedor Pchelkin Date: Wed, 28 Aug 2024 19:14:11 +0300 Subject: [PATCH 081/374] btrfs: qgroup: don't use extent changeset when not needed commit c346c629765ab982967017e2ae859156d0e235cf upstream. The local extent changeset is passed to clear_record_extent_bits() where it may have some additional memory dynamically allocated for ulist. When qgroup is disabled, the memory is leaked because in this case the changeset is not released upon __btrfs_qgroup_release_data() return. Since the recorded contents of the changeset are not used thereafter, just don't pass it. Found by Linux Verification Center (linuxtesting.org) with Syzkaller. Reported-by: syzbot+81670362c283f3dd889c@syzkaller.appspotmail.com Closes: https://lore.kernel.org/lkml/000000000000aa8c0c060ade165e@google.com Fixes: af0e2aab3b70 ("btrfs: qgroup: flush reservations during quota disable") CC: stable@vger.kernel.org # 6.10+ Reviewed-by: Boris Burkov Reviewed-by: Qu Wenruo Signed-off-by: Fedor Pchelkin Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/qgroup.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 3faf2181d1ee..d4486518414d 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -4269,10 +4269,9 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode, int ret; if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) { - extent_changeset_init(&changeset); return clear_record_extent_bits(&inode->io_tree, start, start + len - 1, - EXTENT_QGROUP_RESERVED, &changeset); + EXTENT_QGROUP_RESERVED, NULL); } /* In release case, we shouldn't have @reserved */ From e91dab550dd1d2221333cac9f5c012ab5193696f Mon Sep 17 00:00:00 2001 From: Naohiro Aota Date: Sat, 31 Aug 2024 01:32:49 +0900 Subject: [PATCH 082/374] btrfs: zoned: handle broken write pointer on zones commit b1934cd6069538db2255dc94ba573771ecf3b560 upstream. Btrfs rejects to mount a FS if it finds a block group with a broken write pointer (e.g, unequal write pointers on two zones of RAID1 block group). Since such case can happen easily with a power-loss or crash of a system, we need to handle the case more gently. Handle such block group by making it unallocatable, so that there will be no writes into it. That can be done by setting the allocation pointer at the end of allocating region (= block_group->zone_capacity). Then, existing code handle zone_unusable properly. Having proper zone_capacity is necessary for the change. So, set it as fast as possible. We cannot handle RAID0 and RAID10 case like this. But, they are anyway unable to read because of a missing stripe. Fixes: 265f7237dd25 ("btrfs: zoned: allow DUP on meta-data block groups") Fixes: 568220fa9657 ("btrfs: zoned: support RAID0/1/10 on top of raid stripe tree") CC: stable@vger.kernel.org # 6.1+ Reported-by: HAN Yuwei Cc: Xuefer Signed-off-by: Naohiro Aota Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/zoned.c | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 947a87576f6c..35829a1a2238 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1408,6 +1408,8 @@ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg, return -EINVAL; } + bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity); + if (zone_info[0].alloc_offset == WP_MISSING_DEV) { btrfs_err(bg->fs_info, "zoned: cannot recover write pointer for zone %llu", @@ -1434,7 +1436,6 @@ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg, } bg->alloc_offset = zone_info[0].alloc_offset; - bg->zone_capacity = min(zone_info[0].capacity, zone_info[1].capacity); return 0; } @@ -1452,6 +1453,9 @@ static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg, return -EINVAL; } + /* In case a device is missing we have a cap of 0, so don't use it. */ + bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity); + for (i = 0; i < map->num_stripes; i++) { if (zone_info[i].alloc_offset == WP_MISSING_DEV || zone_info[i].alloc_offset == WP_CONVENTIONAL) @@ -1473,9 +1477,6 @@ static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg, if (test_bit(0, active)) set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); } - /* In case a device is missing we have a cap of 0, so don't use it. */ - bg->zone_capacity = min_not_zero(zone_info[0].capacity, - zone_info[1].capacity); } if (zone_info[0].alloc_offset != WP_MISSING_DEV) @@ -1565,6 +1566,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) unsigned long *active = NULL; u64 last_alloc = 0; u32 num_sequential = 0, num_conventional = 0; + u64 profile; if (!btrfs_is_zoned(fs_info)) return 0; @@ -1625,7 +1627,8 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) } } - switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { + profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK; + switch (profile) { case 0: /* single */ ret = btrfs_load_block_group_single(cache, &zone_info[0], active); break; @@ -1652,6 +1655,23 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) goto out; } + if (ret == -EIO && profile != 0 && profile != BTRFS_BLOCK_GROUP_RAID0 && + profile != BTRFS_BLOCK_GROUP_RAID10) { + /* + * Detected broken write pointer. Make this block group + * unallocatable by setting the allocation pointer at the end of + * allocatable region. Relocating this block group will fix the + * mismatch. + * + * Currently, we cannot handle RAID0 or RAID10 case like this + * because we don't have a proper zone_capacity value. But, + * reading from this block group won't work anyway by a missing + * stripe. + */ + cache->alloc_offset = cache->zone_capacity; + ret = 0; + } + out: /* Reject non SINGLE data profiles without RST */ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && From 966fe02a285da56ed54ab4a21e85dbac2ff5179b Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 28 Aug 2024 14:51:54 -0700 Subject: [PATCH 083/374] drm/xe/gsc: Do not attempt to load the GSC multiple times commit 529bf8d1118bbaa1aa835563a22b0b5c64ca9d68 upstream. The GSC HW is only reset by driver FLR or D3cold entry. We don't support the former at runtime, while the latter is only supported on DGFX, for which we don't support GSC. Therefore, if GSC failed to load previously there is no need to try again because the HW is stuck in the error state. An assert has been added so that if we ever add DGFX support we'll know we need to handle the D3 case. v2: use "< 0" instead of "!= 0" in the FW state error check (Julia). Fixes: dd0e89e5edc2 ("drm/xe/gsc: GSC FW load") Signed-off-by: Daniele Ceraolo Spurio Cc: John Harrison Cc: Alan Previn Cc: # v6.8+ Reviewed-by: Julia Filipchuk Link: https://patchwork.freedesktop.org/patch/msgid/20240828215158.2743994-2-daniele.ceraolospurio@intel.com (cherry picked from commit 2160f6f6e3cf6893a83357c3b82ff8589bdc0f08) Signed-off-by: Rodrigo Vivi Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/xe/xe_gsc.c | 12 ++++++++++++ drivers/gpu/drm/xe/xe_uc_fw.h | 9 +++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c index 60202b903687..95c17b72fa57 100644 --- a/drivers/gpu/drm/xe/xe_gsc.c +++ b/drivers/gpu/drm/xe/xe_gsc.c @@ -511,10 +511,22 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc) void xe_gsc_load_start(struct xe_gsc *gsc) { struct xe_gt *gt = gsc_to_gt(gsc); + struct xe_device *xe = gt_to_xe(gt); if (!xe_uc_fw_is_loadable(&gsc->fw) || !gsc->q) return; + /* + * The GSC HW is only reset by driver FLR or D3cold entry. We don't + * support the former at runtime, while the latter is only supported on + * DGFX, for which we don't support GSC. Therefore, if GSC failed to + * load previously there is no need to try again because the HW is + * stuck in the error state. + */ + xe_assert(xe, !IS_DGFX(xe)); + if (xe_uc_fw_is_in_error_state(&gsc->fw)) + return; + /* GSC FW survives GT reset and D3Hot */ if (gsc_fw_is_loaded(gt)) { xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED); diff --git a/drivers/gpu/drm/xe/xe_uc_fw.h b/drivers/gpu/drm/xe/xe_uc_fw.h index 35078038797e..03951cb6de1c 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.h +++ b/drivers/gpu/drm/xe/xe_uc_fw.h @@ -65,7 +65,7 @@ const char *xe_uc_fw_status_repr(enum xe_uc_fw_status status) return ""; } -static inline int xe_uc_fw_status_to_error(enum xe_uc_fw_status status) +static inline int xe_uc_fw_status_to_error(const enum xe_uc_fw_status status) { switch (status) { case XE_UC_FIRMWARE_NOT_SUPPORTED: @@ -108,7 +108,7 @@ static inline const char *xe_uc_fw_type_repr(enum xe_uc_fw_type type) } static inline enum xe_uc_fw_status -__xe_uc_fw_status(struct xe_uc_fw *uc_fw) +__xe_uc_fw_status(const struct xe_uc_fw *uc_fw) { /* shouldn't call this before checking hw/blob availability */ XE_WARN_ON(uc_fw->status == XE_UC_FIRMWARE_UNINITIALIZED); @@ -156,6 +156,11 @@ static inline bool xe_uc_fw_is_overridden(const struct xe_uc_fw *uc_fw) return uc_fw->user_overridden; } +static inline bool xe_uc_fw_is_in_error_state(const struct xe_uc_fw *uc_fw) +{ + return xe_uc_fw_status_to_error(__xe_uc_fw_status(uc_fw)) < 0; +} + static inline void xe_uc_fw_sanitize(struct xe_uc_fw *uc_fw) { if (xe_uc_fw_is_loaded(uc_fw)) From a6b268fce7bf5c6b8cb3942f315483c628a1635d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20Larumbe?= Date: Mon, 2 Sep 2024 14:02:35 +0100 Subject: [PATCH 084/374] drm/panthor: flush FW AS caches in slow reset path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 7de295d1a1a1b84e57b348e8bfd0fab5aab3ce69 upstream. In the off-chance that waiting for the firmware to signal its booted status timed out in the fast reset path, one must flush the cache lines for the entire FW VM address space before reloading the regions, otherwise stale values eventually lead to a scheduler job timeout. Fixes: 647810ec2476 ("drm/panthor: Add the MMU/VM logical block") Cc: stable@vger.kernel.org Signed-off-by: Adrián Larumbe Acked-by: Liviu Dudau Reviewed-by: Steven Price Reviewed-by: Boris Brezillon Signed-off-by: Boris Brezillon Link: https://patchwork.freedesktop.org/patch/msgid/20240902130237.3440720-1-adrian.larumbe@collabora.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/panthor/panthor_fw.c | 8 +++++++- drivers/gpu/drm/panthor/panthor_mmu.c | 21 ++++++++++++++++++--- drivers/gpu/drm/panthor/panthor_mmu.h | 1 + 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 857f3f11258a..ef232c0c2049 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -1089,6 +1089,12 @@ int panthor_fw_post_reset(struct panthor_device *ptdev) panthor_fw_stop(ptdev); ptdev->fw->fast_reset = false; drm_err(&ptdev->base, "FW fast reset failed, trying a slow reset"); + + ret = panthor_vm_flush_all(ptdev->fw->vm); + if (ret) { + drm_err(&ptdev->base, "FW slow reset failed (couldn't flush FW's AS l2cache)"); + return ret; + } } /* Reload all sections, including RO ones. We're not supposed @@ -1099,7 +1105,7 @@ int panthor_fw_post_reset(struct panthor_device *ptdev) ret = panthor_fw_start(ptdev); if (ret) { - drm_err(&ptdev->base, "FW slow reset failed"); + drm_err(&ptdev->base, "FW slow reset failed (couldn't start the FW )"); return ret; } diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index fa0a002b1016..cc6e13a97783 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -576,6 +576,12 @@ static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr, if (as_nr < 0) return 0; + /* + * If the AS number is greater than zero, then we can be sure + * the device is up and running, so we don't need to explicitly + * power it up + */ + if (op != AS_COMMAND_UNLOCK) lock_region(ptdev, as_nr, iova, size); @@ -874,14 +880,23 @@ static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size) if (!drm_dev_enter(&ptdev->base, &cookie)) return 0; - /* Flush the PTs only if we're already awake */ - if (pm_runtime_active(ptdev->base.dev)) - ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT); + ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT); drm_dev_exit(cookie); return ret; } +/** + * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS + * @vm: VM whose cache to flush + * + * Return: 0 on success, a negative error code if flush failed. + */ +int panthor_vm_flush_all(struct panthor_vm *vm) +{ + return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range); +} + static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) { struct panthor_device *ptdev = vm->ptdev; diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h index f3c1ed19f973..6788771071e3 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.h +++ b/drivers/gpu/drm/panthor/panthor_mmu.h @@ -31,6 +31,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset); int panthor_vm_active(struct panthor_vm *vm); void panthor_vm_idle(struct panthor_vm *vm); int panthor_vm_as(struct panthor_vm *vm); +int panthor_vm_flush_all(struct panthor_vm *vm); struct panthor_heap_pool * panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create); From 33eb0344e186a2bcc257c6c5a6e65c1cb42adb4a Mon Sep 17 00:00:00 2001 From: Mary Guillemard Date: Tue, 3 Sep 2024 16:49:55 +0200 Subject: [PATCH 085/374] drm/panthor: Restrict high priorities on group_create commit 5f7762042f8a5377bd8a32844db353c0311a7369 upstream. We were allowing any users to create a high priority group without any permission checks. As a result, this was allowing possible denial of service. We now only allow the DRM master or users with the CAP_SYS_NICE capability to set higher priorities than PANTHOR_GROUP_PRIORITY_MEDIUM. As the sole user of that uAPI lives in Mesa and hardcode a value of MEDIUM [1], this should be safe to do. Additionally, as those checks are performed at the ioctl level, panthor_group_create now only check for priority level validity. [1]https://gitlab.freedesktop.org/mesa/mesa/-/blob/f390835074bdf162a63deb0311d1a6de527f9f89/src/gallium/drivers/panfrost/pan_csf.c#L1038 Signed-off-by: Mary Guillemard Fixes: de8548813824 ("drm/panthor: Add the scheduler logical block") Cc: stable@vger.kernel.org Reviewed-by: Boris Brezillon Signed-off-by: Boris Brezillon Link: https://patchwork.freedesktop.org/patch/msgid/20240903144955.144278-2-mary.guillemard@collabora.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/panthor/panthor_drv.c | 23 +++++++++++++++++++++++ drivers/gpu/drm/panthor/panthor_sched.c | 2 +- include/uapi/drm/panthor_drm.h | 6 +++++- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index b5e7b919f241..34182f67136c 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -996,6 +997,24 @@ static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data, return panthor_group_destroy(pfile, args->group_handle); } +static int group_priority_permit(struct drm_file *file, + u8 priority) +{ + /* Ensure that priority is valid */ + if (priority > PANTHOR_GROUP_PRIORITY_HIGH) + return -EINVAL; + + /* Medium priority and below are always allowed */ + if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM) + return 0; + + /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */ + if (capable(CAP_SYS_NICE) || drm_is_current_master(file)) + return 0; + + return -EACCES; +} + static int panthor_ioctl_group_create(struct drm_device *ddev, void *data, struct drm_file *file) { @@ -1011,6 +1030,10 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data, if (ret) return ret; + ret = group_priority_permit(file, args->priority); + if (ret) + return ret; + ret = panthor_group_create(pfile, args, queue_args); if (ret >= 0) { args->group_handle = ret; diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 463bcd3cf00f..12b272a912f8 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -3092,7 +3092,7 @@ int panthor_group_create(struct panthor_file *pfile, if (group_args->pad) return -EINVAL; - if (group_args->priority > PANTHOR_CSG_PRIORITY_HIGH) + if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT) return -EINVAL; if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) || diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h index 926b1deb1116..e23a7f9b0eac 100644 --- a/include/uapi/drm/panthor_drm.h +++ b/include/uapi/drm/panthor_drm.h @@ -692,7 +692,11 @@ enum drm_panthor_group_priority { /** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */ PANTHOR_GROUP_PRIORITY_MEDIUM, - /** @PANTHOR_GROUP_PRIORITY_HIGH: High priority group. */ + /** + * @PANTHOR_GROUP_PRIORITY_HIGH: High priority group. + * + * Requires CAP_SYS_NICE or DRM_MASTER. + */ PANTHOR_GROUP_PRIORITY_HIGH, }; From 1cc695be8920df234f83270d789078cb2d3bc564 Mon Sep 17 00:00:00 2001 From: Matt Coster Date: Mon, 2 Sep 2024 09:48:48 +0100 Subject: [PATCH 086/374] drm/imagination: Free pvr_vm_gpuva after unlink commit 3f6b2f60b4631cd0c368da6a1587ab55a696164d upstream. This caused a measurable memory leak. Although the individual allocations are small, the leaks occurs in a high-usage codepath (remapping or unmapping device memory) so they add up quickly. Fixes: ff5f643de0bf ("drm/imagination: Add GEM and VM related code") Cc: stable@vger.kernel.org Reviewed-by: Frank Binns Link: https://patchwork.freedesktop.org/patch/msgid/35867394-d8ce-4698-a8fd-919a018f1583@imgtec.com Signed-off-by: Matt Coster Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/imagination/pvr_vm.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index e59517ba039e..97c0f772ed65 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -114,6 +114,8 @@ struct pvr_vm_gpuva { struct drm_gpuva base; }; +#define to_pvr_vm_gpuva(va) container_of_const(va, struct pvr_vm_gpuva, base) + enum pvr_vm_bind_type { PVR_VM_BIND_TYPE_MAP, PVR_VM_BIND_TYPE_UNMAP, @@ -386,6 +388,7 @@ pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx) drm_gpuva_unmap(&op->unmap); drm_gpuva_unlink(op->unmap.va); + kfree(to_pvr_vm_gpuva(op->unmap.va)); return 0; } @@ -433,6 +436,7 @@ pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx) } drm_gpuva_unlink(op->remap.unmap->va); + kfree(to_pvr_vm_gpuva(op->remap.unmap->va)); return 0; } From 675d6d34fc1c36a7cee0d10e06985fb1e6bc7746 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 26 Mar 2024 11:28:29 -0400 Subject: [PATCH 087/374] drm/amdgpu: always allocate cleared VRAM for GEM allocations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 4de34b04783628f14614badb0a1aa67ce3fcef5d upstream. This adds allocation latency, but aligns better with user expectations. The latency should improve with the drm buddy clearing patches that Arun has been working on. In addition this fixes the high CPU spikes seen when doing wipe on release. v2: always set AMDGPU_GEM_CREATE_VRAM_CLEARED (Christian) Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3528 Fixes: a68c7eaa7a8f ("drm/amdgpu: Enable clear page functionality") Acked-by: Arunpravin Paneer Selvam Reviewed-by: Michel Dänzer (v1) Signed-off-by: Alex Deucher Cc: Arunpravin Paneer Selvam Cc: Christian König (cherry picked from commit 6c0a7c3c693ac84f8b50269a9088af8f37446863) Cc: stable@vger.kernel.org # 6.10.x Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 3adaa4670103..dc65c32bb1b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -347,6 +347,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, return -EINVAL; } + /* always clear VRAM */ + flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; + /* create a gem object to contain this object in */ if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { From d1e4d3df12e4ea31d73af90e72b34c21e311fe23 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 20 Aug 2024 14:59:52 -0700 Subject: [PATCH 088/374] drm/i915: Do not attempt to load the GSC multiple times commit 59d3cfdd7f9655a0400ac453bf92199204f8b2a1 upstream. If the GSC FW fails to load the GSC HW hangs permanently; the only ways to recover it are FLR or D3cold entry, with the former only being supported on driver unload and the latter only on DGFX, for which we don't need to load the GSC. Therefore, if GSC fails to load there is no need to try again because the HW is stuck in the error state and the submission to load the FW would just hang the GSCCS. Note that, due to wa_14015076503, on MTL the GuC escalates all GSCCS hangs to full GT resets, which would trigger a new attempt to load the GSC FW in the post-reset HW re-init; this issue is also fixed by not attempting to load the GSC FW after an error. Fixes: 15bd4a67e914 ("drm/i915/gsc: GSC firmware loading") Signed-off-by: Daniele Ceraolo Spurio Cc: Daniele Ceraolo Spurio Cc: Alan Previn Cc: John Harrison Cc: Rodrigo Vivi Cc: # v6.3+ Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240820215952.2290807-1-daniele.ceraolospurio@intel.com (cherry picked from commit 03ded4d432a1fb7bb6c44c5856d14115f6f6c3b9) Signed-off-by: Joonas Lahtinen Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c index 453d855dd1de..3d3191deb0ab 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c @@ -302,7 +302,7 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc) { struct intel_gt *gt = gsc_uc_to_gt(gsc); - if (!intel_uc_fw_is_loadable(&gsc->fw)) + if (!intel_uc_fw_is_loadable(&gsc->fw) || intel_uc_fw_is_in_error(&gsc->fw)) return; if (intel_gsc_uc_fw_init_done(gsc)) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 9a431726c8d5..ac7b3aad2222 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -258,6 +258,11 @@ static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw) return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING; } +static inline bool intel_uc_fw_is_in_error(struct intel_uc_fw *uc_fw) +{ + return intel_uc_fw_status_to_error(__intel_uc_fw_status(uc_fw)) != 0; +} + static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw) { return uc_fw->user_overridden; From db492b5a322cc3cfad53402e4fb08dfc7452dad2 Mon Sep 17 00:00:00 2001 From: Leo Li Date: Tue, 20 Aug 2024 14:34:15 -0400 Subject: [PATCH 089/374] drm/amd/display: Lock DC and exit IPS when changing backlight commit 53c3685f5307967a62517ace10e69d66520d0fc5 upstream. Backlight updates require aux and/or register access. Therefore, driver needs to disallow IPS beforehand. So, acquire the dc lock before calling into dc to update backlight - we should be doing this regardless of IPS. Then, while the lock is held, disallow IPS before calling into dc, then allow IPS afterwards (if it was previously allowed). Reviewed-by: Aurabindo Pillai Reviewed-by: Roman Li Signed-off-by: Leo Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher (cherry picked from commit 988fe2862635c1b1b40e41c85c24db44ab337c13) Cc: stable@vger.kernel.org # 6.10+ Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 382a41c5b515..0627961b7115 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4237,7 +4237,7 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, struct amdgpu_dm_backlight_caps caps; struct dc_link *link; u32 brightness; - bool rc; + bool rc, reallow_idle = false; amdgpu_dm_update_backlight_caps(dm, bl_idx); caps = dm->backlight_caps[bl_idx]; @@ -4250,6 +4250,12 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, link = (struct dc_link *)dm->backlight_link[bl_idx]; /* Change brightness based on AUX property */ + mutex_lock(&dm->dc_lock); + if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) { + dc_allow_idle_optimizations(dm->dc, false); + reallow_idle = true; + } + if (caps.aux_support) { rc = dc_link_set_backlight_level_nits(link, true, brightness, AUX_BL_DEFAULT_TRANSITION_TIME_MS); @@ -4261,6 +4267,11 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); } + if (dm->dc->caps.ips_support && reallow_idle) + dc_allow_idle_optimizations(dm->dc, true); + + mutex_unlock(&dm->dc_lock); + if (rc) dm->actual_brightness[bl_idx] = user_brightness; } From 1b55a0249ae2ce7421e4801e6ecaa4694a55c7c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Tue, 3 Sep 2024 14:49:31 +0200 Subject: [PATCH 090/374] ALSA: hda/realtek: extend quirks for Clevo V5[46]0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 562755501d44cfbbe82703a62cb41502bd067bd1 upstream. The mic in those laptops suffers too high gain resulting in mostly (fan or else) noise being recorded. In addition to the existing fixup about mic detection, apply also limiting its boost. While at it, extend the quirk to also V5[46]0TNE models, which have the same issue. Signed-off-by: Marek Marczykowski-Górecki Cc: Link: https://patch.msgid.link/20240903124939.6213-1-marmarek@invisiblethingslab.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 1aeafbedb357..0cde024d1d33 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -7526,6 +7526,7 @@ enum { ALC256_FIXUP_CHROME_BOOK, ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7, ALC287_FIXUP_LENOVO_SSID_17AA3820, + ALC245_FIXUP_CLEVO_NOISY_MIC, }; /* A special fixup for Lenovo C940 and Yoga Duet 7; @@ -9857,6 +9858,12 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc287_fixup_lenovo_ssid_17aa3820, }, + [ALC245_FIXUP_CLEVO_NOISY_MIC] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_limit_int_mic_boost, + .chained = true, + .chain_id = ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -10496,7 +10503,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0xa741, "Clevo V54x_6x_TNE", ALC245_FIXUP_CLEVO_NOISY_MIC), + SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC245_FIXUP_CLEVO_NOISY_MIC), SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), From d4e65b669755cb74538d928fbdbf4f19e0ab189a Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 16 Jun 2024 09:34:44 +0200 Subject: [PATCH 091/374] ALSA: control: Apply sanity check of input values for user elements [ Upstream commit 50ed081284fe2bfd1f25e8b92f4f6a4990e73c0a ] Although we have already a mechanism for sanity checks of input values for control writes, it's not applied unless the kconfig CONFIG_SND_CTL_INPUT_VALIDATION is set due to the performance reason. Nevertheless, it still makes sense to apply the same check for user elements despite of its cost, as that's the only way to filter out the invalid values; the user controls are handled solely in ALSA core code, and there is no corresponding driver, after all. This patch adds the same input value validation for user control elements at its put callback. The kselftest will be happier with this change, as the incorrect values will be bailed out now with errors. For other normal controls, the check is applied still only when CONFIG_SND_CTL_INPUT_VALIDATION is set. Reported-by: Paul Menzel Closes: https://lore.kernel.org/r/1d44be36-9bb9-4d82-8953-5ae2a4f09405@molgen.mpg.de Reviewed-by: Jaroslav Kysela Reviewed-by: Mark Brown Reviewed-by: Takashi Sakamoto Signed-off-by: Takashi Iwai Link: https://lore.kernel.org/20240616073454.16512-4-tiwai@suse.de Signed-off-by: Sasha Levin --- sound/core/control.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sound/core/control.c b/sound/core/control.c index fb0c60044f7b..1dd2337e2930 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -1480,12 +1480,16 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol, static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { - int change; + int err, change; struct user_element *ue = kcontrol->private_data; unsigned int size = ue->elem_data_size; char *dst = ue->elem_data + snd_ctl_get_ioff(kcontrol, &ucontrol->id) * size; + err = sanity_check_input_values(ue->card, ucontrol, &ue->info, false); + if (err < 0) + return err; + change = memcmp(&ucontrol->value, dst, size) != 0; if (change) memcpy(dst, &ucontrol->value, size); From b0d13e92d92780324c5d445db7846efed140d804 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 16 Jun 2024 09:34:47 +0200 Subject: [PATCH 092/374] ALSA: hda: Add input value sanity checks to HDMI channel map controls [ Upstream commit 6278056e42d953e207e2afd416be39d09ed2d496 ] Add a simple sanity check to HD-audio HDMI Channel Map controls. Although the value might not be accepted for the actual connection, we can filter out some bogus values beforehand, and that should be enough for making kselftest happier. Reviewed-by: Jaroslav Kysela Signed-off-by: Takashi Iwai Link: https://lore.kernel.org/20240616073454.16512-7-tiwai@suse.de Signed-off-by: Sasha Levin --- sound/hda/hdmi_chmap.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c index 5d8e1d944b0a..7b276047f85a 100644 --- a/sound/hda/hdmi_chmap.c +++ b/sound/hda/hdmi_chmap.c @@ -753,6 +753,20 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol, return 0; } +/* a simple sanity check for input values to chmap kcontrol */ +static int chmap_value_check(struct hdac_chmap *hchmap, + const struct snd_ctl_elem_value *ucontrol) +{ + int i; + + for (i = 0; i < hchmap->channels_max; i++) { + if (ucontrol->value.integer.value[i] < 0 || + ucontrol->value.integer.value[i] > SNDRV_CHMAP_LAST) + return -EINVAL; + } + return 0; +} + static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -764,6 +778,10 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol, unsigned char chmap[8], per_pin_chmap[8]; int i, err, ca, prepared = 0; + err = chmap_value_check(hchmap, ucontrol); + if (err < 0) + return err; + /* No monitor is connected in dyn_pcm_assign. * It's invalid to setup the chmap */ From 3e0a295002822e35c2b45d0a139ceb70d5a5d0a0 Mon Sep 17 00:00:00 2001 From: Aaradhana Sahu Date: Tue, 11 Jun 2024 08:40:17 +0530 Subject: [PATCH 093/374] wifi: ath12k: fix uninitialize symbol error on ath12k_peer_assoc_h_he() [ Upstream commit 19b77e7c656a3e125319cc3ef347b397cf042bf6 ] Smatch throws following errors drivers/net/wireless/ath/ath12k/mac.c:1922 ath12k_peer_assoc_h_he() error: uninitialized symbol 'rx_mcs_80'. drivers/net/wireless/ath/ath12k/mac.c:1922 ath12k_peer_assoc_h_he() error: uninitialized symbol 'rx_mcs_160'. drivers/net/wireless/ath/ath12k/mac.c:1924 ath12k_peer_assoc_h_he() error: uninitialized symbol 'rx_mcs_80'. In ath12k_peer_assoc_h_he() rx_mcs_80 and rx_mcs_160 variables remain uninitialized in the following conditions: 1. Whenever the value of mcs_80 become equal to IEEE80211_HE_MCS_NOT_SUPPORTED then rx_mcs_80 remains uninitialized. 2. Whenever phy capability is not supported 160 channel width and value of mcs_160 become equal to IEEE80211_HE_MCS_NOT_SUPPORTED then rx_mcs_160 remains uninitialized. Initialize these variables during declaration. Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.1.1-00188-QCAHKSWPL_SILICONZ-1 Signed-off-by: Aaradhana Sahu Acked-by: Jeff Johnson Signed-off-by: Kalle Valo Link: https://patch.msgid.link/20240611031017.297927-3-quic_aarasahu@quicinc.com Signed-off-by: Sasha Levin --- drivers/net/wireless/ath/ath12k/mac.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 8474e25d2ac6..71b4ec7717d5 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -1881,7 +1881,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, { const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; int i; - u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss; + u8 ampdu_factor, max_nss; + u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; + u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; u16 mcs_160_map, mcs_80_map; bool support_160; u16 v; From 25a15f80253a7c8776e4e4880d797d20ec864154 Mon Sep 17 00:00:00 2001 From: Ajith C Date: Thu, 13 Jun 2024 11:05:28 +0530 Subject: [PATCH 094/374] wifi: ath12k: fix firmware crash due to invalid peer nss [ Upstream commit db163a463bb93cd3e37e1e7b10b9726fb6f95857 ] Currently, if the access point receives an association request containing an Extended HE Capabilities Information Element with an invalid MCS-NSS, it triggers a firmware crash. This issue arises when EHT-PHY capabilities shows support for a bandwidth and MCS-NSS set for that particular bandwidth is filled by zeros and due to this, driver obtains peer_nss as 0 and sending this value to firmware causes crash. Address this issue by implementing a validation step for the peer_nss value before passing it to the firmware. If the value is greater than zero, proceed with forwarding it to the firmware. However, if the value is invalid, reject the association request to prevent potential firmware crashes. Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.0.1-00029-QCAHKSWPL_SILICONZ-1 Signed-off-by: Ajith C Acked-by: Jeff Johnson Signed-off-by: Kalle Valo Link: https://patch.msgid.link/20240613053528.2541645-1-quic_ajithc@quicinc.com Signed-off-by: Sasha Levin --- drivers/net/wireless/ath/ath12k/mac.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 71b4ec7717d5..7037004ce977 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -3847,6 +3847,11 @@ static int ath12k_station_assoc(struct ath12k *ar, ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); + if (peer_arg.peer_nss < 1) { + ath12k_warn(ar->ab, + "invalid peer NSS %d\n", peer_arg.peer_nss); + return -EINVAL; + } ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (ret) { ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", From 73656d1f4a74857f4f65942c2ad0cadff264df76 Mon Sep 17 00:00:00 2001 From: Konstantin Andreev Date: Mon, 17 Jun 2024 01:44:30 +0300 Subject: [PATCH 095/374] smack: unix sockets: fix accept()ed socket label [ Upstream commit e86cac0acdb1a74f608bacefe702f2034133a047 ] When a process accept()s connection from a unix socket (either stream or seqpacket) it gets the socket with the label of the connecting process. For example, if a connecting process has a label 'foo', the accept()ed socket will also have 'in' and 'out' labels 'foo', regardless of the label of the listener process. This is because kernel creates unix child sockets in the context of the connecting process. I do not see any obvious way for the listener to abuse alien labels coming with the new socket, but, to be on the safe side, it's better fix new socket labels. Signed-off-by: Konstantin Andreev Signed-off-by: Casey Schaufler Signed-off-by: Sasha Levin --- security/smack/smack_lsm.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index ab939e6449e4..002a1b9ed83a 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -3871,12 +3871,18 @@ static int smack_unix_stream_connect(struct sock *sock, } } - /* - * Cross reference the peer labels for SO_PEERSEC. - */ if (rc == 0) { + /* + * Cross reference the peer labels for SO_PEERSEC. + */ nsp->smk_packet = ssp->smk_out; ssp->smk_packet = osp->smk_out; + + /* + * new/child/established socket must inherit listening socket labels + */ + nsp->smk_out = osp->smk_out; + nsp->smk_in = osp->smk_in; } return rc; From 4e2b49a85e7974d21364798c5d4aa8070aa864d9 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Thu, 6 Jun 2024 21:23:39 -0600 Subject: [PATCH 096/374] drm/amd/display: Check UnboundedRequestEnabled's value [ Upstream commit a7b38c7852093385d0605aa3c8a2efd6edd1edfd ] CalculateSwathAndDETConfiguration_params_st's UnboundedRequestEnabled is a pointer (i.e. dml_bool_t *UnboundedRequestEnabled), and thus if (p->UnboundedRequestEnabled) checks its address, not bool value. This fixes 1 REVERSE_INULL issue reported by Coverity. Reviewed-by: Harry Wentland Acked-by: Hamza Mahfooz Signed-off-by: Alex Hung Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 3e919f5c00ca..fee1df342f12 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -4282,7 +4282,7 @@ static void CalculateSwathAndDETConfiguration(struct display_mode_lib_scratch_st } *p->compbuf_reserved_space_64b = 2 * p->PixelChunkSizeInKByte * 1024 / 64; - if (p->UnboundedRequestEnabled) { + if (*p->UnboundedRequestEnabled) { *p->compbuf_reserved_space_64b = dml_max(*p->compbuf_reserved_space_64b, (dml_float_t)(p->ROBBufferSizeInKByte * 1024/64) - (dml_float_t)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / MAXIMUMCOMPRESSION/64)); From 941358a2c56e7d3184739f0cb7b1ec31d264bc66 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Mon, 17 Jun 2024 10:39:43 -0400 Subject: [PATCH 097/374] cgroup/cpuset: Delay setting of CS_CPU_EXCLUSIVE until valid partition [ Upstream commit fe8cd2736e75c8ca3aed1ef181a834e41dc5310f ] The CS_CPU_EXCLUSIVE flag is currently set whenever cpuset.cpus.exclusive is set to make sure that the exclusivity test will be run to ensure its exclusiveness. At the same time, this flag can be changed whenever the partition root state is changed. For example, the CS_CPU_EXCLUSIVE flag will be reset whenever a partition root becomes invalid. This makes using CS_CPU_EXCLUSIVE to ensure exclusiveness a bit fragile. The current scheme also makes setting up a cpuset.cpus.exclusive hierarchy to enable remote partition harder as cpuset.cpus.exclusive cannot overlap with any cpuset.cpus of sibling cpusets if their cpuset.cpus.exclusive aren't set. Solve these issues by deferring the setting of CS_CPU_EXCLUSIVE flag until the cpuset become a valid partition root while adding new checks in validate_change() to ensure that cpuset.cpus.exclusive of sibling cpusets cannot overlap. An additional check is also added to validate_change() to make sure that cpuset.cpus of one cpuset cannot be a subset of cpuset.cpus.exclusive of a sibling cpuset to avoid the problem that none of those CPUs will be available when these exclusive CPUs are extracted out to a newly enabled partition root. The Documentation/admin-guide/cgroup-v2.rst file is updated to document the new constraints. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo Signed-off-by: Sasha Levin --- Documentation/admin-guide/cgroup-v2.rst | 8 ++++-- kernel/cgroup/cpuset.c | 36 ++++++++++++++++++++----- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 8fbb0519d556..b69f701b2485 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -2346,8 +2346,12 @@ Cpuset Interface Files is always a subset of it. Users can manually set it to a value that is different from - "cpuset.cpus". The only constraint in setting it is that the - list of CPUs must be exclusive with respect to its sibling. + "cpuset.cpus". One constraint in setting it is that the list of + CPUs must be exclusive with respect to "cpuset.cpus.exclusive" + of its sibling. If "cpuset.cpus.exclusive" of a sibling cgroup + isn't set, its "cpuset.cpus" value, if set, cannot be a subset + of it to leave at least one CPU available when the exclusive + CPUs are taken away. For a parent cgroup, any one of its exclusive CPUs can only be distributed to at most one of its child cgroups. Having an diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index fc1c6236460d..e8f24483e05f 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -826,17 +826,41 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) /* * If either I or some sibling (!= me) is exclusive, we can't - * overlap + * overlap. exclusive_cpus cannot overlap with each other if set. */ ret = -EINVAL; cpuset_for_each_child(c, css, par) { - if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && - c != cur) { + bool txset, cxset; /* Are exclusive_cpus set? */ + + if (c == cur) + continue; + + txset = !cpumask_empty(trial->exclusive_cpus); + cxset = !cpumask_empty(c->exclusive_cpus); + if (is_cpu_exclusive(trial) || is_cpu_exclusive(c) || + (txset && cxset)) { if (!cpusets_are_exclusive(trial, c)) goto out; + } else if (txset || cxset) { + struct cpumask *xcpus, *acpus; + + /* + * When just one of the exclusive_cpus's is set, + * cpus_allowed of the other cpuset, if set, cannot be + * a subset of it or none of those CPUs will be + * available if these exclusive CPUs are activated. + */ + if (txset) { + xcpus = trial->exclusive_cpus; + acpus = c->cpus_allowed; + } else { + xcpus = c->exclusive_cpus; + acpus = trial->cpus_allowed; + } + if (!cpumask_empty(acpus) && cpumask_subset(acpus, xcpus)) + goto out; } if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && - c != cur && nodes_intersects(trial->mems_allowed, c->mems_allowed)) goto out; } @@ -1376,7 +1400,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, */ static int update_partition_exclusive(struct cpuset *cs, int new_prs) { - bool exclusive = (new_prs > 0); + bool exclusive = (new_prs > PRS_MEMBER); if (exclusive && !is_cpu_exclusive(cs)) { if (update_flag(CS_CPU_EXCLUSIVE, cs, 1)) @@ -2624,8 +2648,6 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, retval = cpulist_parse(buf, trialcs->exclusive_cpus); if (retval < 0) return retval; - if (!is_cpu_exclusive(cs)) - set_bit(CS_CPU_EXCLUSIVE, &trialcs->flags); } /* Nothing to do if the CPUs didn't change */ From 2768720348889ff24954b8e79f75f0296b850799 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Fri, 29 Mar 2024 22:54:41 +0100 Subject: [PATCH 098/374] virt: sev-guest: Mark driver struct with __refdata to prevent section mismatch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 3991b04d4870fd334b77b859a8642ca7fb592603 ] As described in the added code comment, a reference to .exit.text is ok for drivers registered via module_platform_driver_probe(). Make this explicit to prevent the following section mismatch warning: WARNING: modpost: drivers/virt/coco/sev-guest/sev-guest: section mismatch in reference: \ sev_guest_driver+0x10 (section: .data) -> sev_guest_remove (section: .exit.text) that triggers on an allmodconfig W=1 build. Signed-off-by: Uwe Kleine-König Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Kuppuswamy Sathyanarayanan Reviewed-by: Tom Lendacky Link: https://lore.kernel.org/r/4a81b0e87728a58904283e2d1f18f73abc69c2a1.1711748999.git.u.kleine-koenig@pengutronix.de Signed-off-by: Sasha Levin --- drivers/virt/coco/sev-guest/sev-guest.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c index 654290a8e1ba..a100d6241992 100644 --- a/drivers/virt/coco/sev-guest/sev-guest.c +++ b/drivers/virt/coco/sev-guest/sev-guest.c @@ -1009,8 +1009,13 @@ static void __exit sev_guest_remove(struct platform_device *pdev) * This driver is meant to be a common SEV guest interface driver and to * support any SEV guest API. As such, even though it has been introduced * with the SEV-SNP support, it is named "sev-guest". + * + * sev_guest_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound + * at runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. */ -static struct platform_driver sev_guest_driver = { +static struct platform_driver sev_guest_driver __refdata = { .remove_new = __exit_p(sev_guest_remove), .driver = { .name = "sev-guest", From af246e400789b82beafe610fe5de5d87067e24a6 Mon Sep 17 00:00:00 2001 From: Leon Hwang Date: Mon, 10 Jun 2024 20:42:23 +0800 Subject: [PATCH 099/374] bpf, verifier: Correct tail_call_reachable for bpf prog [ Upstream commit 01793ed86b5d7df1e956520b5474940743eb7ed8 ] It's confusing to inspect 'prog->aux->tail_call_reachable' with drgn[0], when bpf prog has tail call but 'tail_call_reachable' is false. This patch corrects 'tail_call_reachable' when bpf prog has tail call. Signed-off-by: Leon Hwang Link: https://lore.kernel.org/r/20240610124224.34673-2-hffilwlqm@gmail.com Signed-off-by: Alexei Starovoitov Signed-off-by: Sasha Levin --- kernel/bpf/verifier.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 521bd7efae03..73f55f4b945e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2982,8 +2982,10 @@ static int check_subprogs(struct bpf_verifier_env *env) if (code == (BPF_JMP | BPF_CALL) && insn[i].src_reg == 0 && - insn[i].imm == BPF_FUNC_tail_call) + insn[i].imm == BPF_FUNC_tail_call) { subprog[cur_subprog].has_tail_call = true; + subprog[cur_subprog].tail_call_reachable = true; + } if (BPF_CLASS(code) == BPF_LD && (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) subprog[cur_subprog].has_ld_abs = true; From 1cf8cd80903073440b6ea055811d04edd24fe4f7 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Fri, 21 Jun 2024 21:54:50 +0300 Subject: [PATCH 100/374] ELF: fix kernel.randomize_va_space double read [ Upstream commit 2a97388a807b6ab5538aa8f8537b2463c6988bd2 ] ELF loader uses "randomize_va_space" twice. It is sysctl and can change at any moment, so 2 loads could see 2 different values in theory with unpredictable consequences. Issue exactly one load for consistent value across one exec. Signed-off-by: Alexey Dobriyan Link: https://lore.kernel.org/r/3329905c-7eb8-400a-8f0a-d87cff979b5b@p183 Signed-off-by: Kees Cook Signed-off-by: Sasha Levin --- fs/binfmt_elf.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index a43897b03ce9..777405719de8 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1003,7 +1003,8 @@ static int load_elf_binary(struct linux_binprm *bprm) if (elf_read_implies_exec(*elf_ex, executable_stack)) current->personality |= READ_IMPLIES_EXEC; - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) + const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space); + if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space) current->flags |= PF_RANDOMIZE; setup_new_exec(bprm); @@ -1251,7 +1252,7 @@ static int load_elf_binary(struct linux_binprm *bprm) mm->end_data = end_data; mm->start_stack = bprm->p; - if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { + if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) { /* * For architectures with ELF randomization, when executing * a loader directly (i.e. no interpreter listed in ELF From b69449b6c174019fa946c3667a874ab611be10d7 Mon Sep 17 00:00:00 2001 From: Rakesh Ughreja Date: Tue, 26 Mar 2024 08:01:02 +0200 Subject: [PATCH 101/374] accel/habanalabs/gaudi2: unsecure edma max outstanding register [ Upstream commit 3309887c6ff8ca2ac05a74e1ee5d1c44829f63f2 ] Netowrk EDMAs uses more outstanding transfers so this needs to be programmed by EDMA firmware. Signed-off-by: Rakesh Ughreja Reviewed-by: Ofir Bitton Signed-off-by: Ofir Bitton Signed-off-by: Sasha Levin --- drivers/accel/habanalabs/gaudi2/gaudi2_security.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c index 34bf80c5a44b..307ccb912ccd 100644 --- a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c +++ b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c @@ -479,6 +479,7 @@ static const u32 gaudi2_pb_dcr0_edma0_unsecured_regs[] = { mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS, mmDCORE0_EDMA0_CORE_CTX_IDX, mmDCORE0_EDMA0_CORE_CTX_IDX_INC, + mmDCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND, mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG, mmDCORE0_EDMA0_QM_CQ_CFG0_0, mmDCORE0_EDMA0_QM_CQ_CFG0_1, From 01e43f1f651c46fd211332c104ad772dbd0701ae Mon Sep 17 00:00:00 2001 From: Lad Prabhakar Date: Thu, 6 Jun 2024 20:48:13 +0100 Subject: [PATCH 102/374] irqchip/renesas-rzg2l: Reorder function calls in rzg2l_irqc_irq_disable() [ Upstream commit 492eee82574b163fbb3f099c74ce3b4322d0af28 ] The order of function calls in the disable operation should be the reverse of that in the enable operation. Thus, reorder the function calls to first disable the parent IRQ chip before disabling the TINT IRQ. Reported-by: Geert Uytterhoeven Signed-off-by: Lad Prabhakar Signed-off-by: Thomas Gleixner Tested-by: Claudiu Beznea # on RZ/G3S Link: https://lore.kernel.org/r/20240606194813.676823-1-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Sasha Levin --- drivers/irqchip/irq-renesas-rzg2l.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c index f6484bf15e0b..5a4521cf3ec6 100644 --- a/drivers/irqchip/irq-renesas-rzg2l.c +++ b/drivers/irqchip/irq-renesas-rzg2l.c @@ -162,8 +162,8 @@ static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable) static void rzg2l_irqc_irq_disable(struct irq_data *d) { - rzg2l_tint_irq_endisable(d, false); irq_chip_disable_parent(d); + rzg2l_tint_irq_endisable(d, false); } static void rzg2l_irqc_irq_enable(struct irq_data *d) From 34d1122674b8b2da447e0894cc52658f12d48761 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Fri, 21 Jun 2024 11:38:28 +0200 Subject: [PATCH 103/374] irqchip/armada-370-xp: Do not allow mapping IRQ 0 and 1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 3cef738208e5c3cb7084e208caf9bbf684f24feb ] IRQs 0 (IPI) and 1 (MSI) are handled internally by this driver, generic_handle_domain_irq() is never called for these IRQs. Disallow mapping these IRQs. [ Marek: changed commit message ] Signed-off-by: Pali Rohár Signed-off-by: Marek Behún Signed-off-by: Thomas Gleixner Reviewed-by: Andrew Lunn Signed-off-by: Sasha Levin --- drivers/irqchip/irq-armada-370-xp.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 4b021a67bdfe..f488c35d9130 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -566,6 +566,10 @@ static struct irq_chip armada_370_xp_irq_chip = { static int armada_370_xp_mpic_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { + /* IRQs 0 and 1 cannot be mapped, they are handled internally */ + if (hw <= 1) + return -EINVAL; + armada_370_xp_irq_mask(irq_get_irq_data(virq)); if (!is_percpu_irq(hw)) writel(hw, per_cpu_int_base + From 845af9c8ce034dbd8067f8c74d6a4ce273829528 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Thu, 20 Jun 2024 09:52:26 +0200 Subject: [PATCH 104/374] media: b2c2: flexcop-usb: fix flexcop_usb_memory_req [ Upstream commit b178aa6f333b07bda0548d7e45085660a112414d ] smatch generated this warning: drivers/media/usb/b2c2/flexcop-usb.c:199 flexcop_usb_memory_req() warn: iterator 'i' not incremented and indeed the function is not using i or updating buf. The reason this always worked is that this function is called to write just 6 bytes (a MAC address) to the USB device, and so in practice there is only a single chunk written. If we ever would need to write more than one chunk, this function would fail since each chunk would read from or write to the same buf address. Rewrite the function to properly handle this. Signed-off-by: Hans Verkuil Signed-off-by: Sasha Levin --- drivers/media/usb/b2c2/flexcop-usb.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index 90f1aea99dac..8033622543f2 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -179,7 +179,7 @@ static int flexcop_usb_memory_req(struct flexcop_usb *fc_usb, flexcop_usb_request_t req, flexcop_usb_mem_page_t page_start, u32 addr, int extended, u8 *buf, u32 len) { - int i, ret = 0; + int ret = 0; u16 wMax; u32 pagechunk = 0; @@ -196,7 +196,7 @@ static int flexcop_usb_memory_req(struct flexcop_usb *fc_usb, default: return -EINVAL; } - for (i = 0; i < len;) { + while (len) { pagechunk = min(wMax, bytes_left_to_read_on_page(addr, len)); deb_info("%x\n", (addr & V8_MEMORY_PAGE_MASK) | @@ -206,11 +206,12 @@ static int flexcop_usb_memory_req(struct flexcop_usb *fc_usb, page_start + (addr / V8_MEMORY_PAGE_SIZE), (addr & V8_MEMORY_PAGE_MASK) | (V8_MEMORY_EXTENDED*extended), - &buf[i], pagechunk); + buf, pagechunk); if (ret < 0) return ret; addr += pagechunk; + buf += pagechunk; len -= pagechunk; } return 0; From c8cb076f865fc780bdfbdde4fd6802bf07e6923c Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Thu, 20 Jun 2024 13:56:22 -0700 Subject: [PATCH 105/374] af_unix: Remove put_pid()/put_cred() in copy_peercred(). [ Upstream commit e4bd881d987121dbf1a288641491955a53d9f8f7 ] When (AF_UNIX, SOCK_STREAM) socket connect()s to a listening socket, the listener's sk_peer_pid/sk_peer_cred are copied to the client in copy_peercred(). Then, the client's sk_peer_pid and sk_peer_cred are always NULL, so we need not call put_pid() and put_cred() there. Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- net/unix/af_unix.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index be5266007b48..84a332f95aa8 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -692,9 +692,6 @@ static void init_peercred(struct sock *sk) static void copy_peercred(struct sock *sk, struct sock *peersk) { - const struct cred *old_cred; - struct pid *old_pid; - if (sk < peersk) { spin_lock(&sk->sk_peer_lock); spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING); @@ -702,16 +699,12 @@ static void copy_peercred(struct sock *sk, struct sock *peersk) spin_lock(&peersk->sk_peer_lock); spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING); } - old_pid = sk->sk_peer_pid; - old_cred = sk->sk_peer_cred; + sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); spin_unlock(&sk->sk_peer_lock); spin_unlock(&peersk->sk_peer_lock); - - put_pid(old_pid); - put_cred(old_cred); } static int unix_listen(struct socket *sock, int backlog) From 86e99aab7e18377590a9d0135b5aa7f792272284 Mon Sep 17 00:00:00 2001 From: Brian Johannesmeyer Date: Thu, 23 May 2024 23:50:29 +0200 Subject: [PATCH 106/374] x86/kmsan: Fix hook for unaligned accesses [ Upstream commit bf6ab33d8487f5e2a0998ce75286eae65bb0a6d6 ] When called with a 'from' that is not 4-byte-aligned, string_memcpy_fromio() calls the movs() macro to copy the first few bytes, so that 'from' becomes 4-byte-aligned before calling rep_movs(). This movs() macro modifies 'to', and the subsequent line modifies 'n'. As a result, on unaligned accesses, kmsan_unpoison_memory() uses the updated (aligned) values of 'to' and 'n'. Hence, it does not unpoison the entire region. Save the original values of 'to' and 'n', and pass those to kmsan_unpoison_memory(), so that the entire region is unpoisoned. Signed-off-by: Brian Johannesmeyer Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Alexander Potapenko Link: https://lore.kernel.org/r/20240523215029.4160518-1-bjohannesmeyer@gmail.com Signed-off-by: Sasha Levin --- arch/x86/lib/iomem.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c index e0411a3774d4..5eecb45d05d5 100644 --- a/arch/x86/lib/iomem.c +++ b/arch/x86/lib/iomem.c @@ -25,6 +25,9 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n) static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) { + const void *orig_to = to; + const size_t orig_n = n; + if (unlikely(!n)) return; @@ -39,7 +42,7 @@ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, si } rep_movs(to, (const void *)from, n); /* KMSAN must treat values read from devices as initialized. */ - kmsan_unpoison_memory(to, n); + kmsan_unpoison_memory(orig_to, orig_n); } static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n) From 01579894637be7bcd50555bdd86d5428787a85dc Mon Sep 17 00:00:00 2001 From: Jernej Skrabec Date: Sun, 16 Jun 2024 23:40:52 +0100 Subject: [PATCH 107/374] iommu: sun50i: clear bypass register [ Upstream commit 927c70c93d929f4c2dcaf72f51b31bb7d118a51a ] The Allwinner H6 IOMMU has a bypass register, which allows to circumvent the page tables for each possible master. The reset value for this register is 0, which disables the bypass. The Allwinner H616 IOMMU resets this register to 0x7f, which activates the bypass for all masters, which is not what we want. Always clear this register to 0, to enforce the usage of page tables, and make this driver compatible with the H616 in this respect. Signed-off-by: Jernej Skrabec Signed-off-by: Andre Przywara Reviewed-by: Chen-Yu Tsai Link: https://lore.kernel.org/r/20240616224056.29159-2-andre.przywara@arm.com Signed-off-by: Joerg Roedel Signed-off-by: Sasha Levin --- drivers/iommu/sun50i-iommu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index c519b991749d..dd3f07384624 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -452,6 +452,7 @@ static int sun50i_iommu_enable(struct sun50i_iommu *iommu) IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) | IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) | IOMMU_TLB_PREFETCH_MASTER_ENABLE(5)); + iommu_write(iommu, IOMMU_BYPASS_REG, 0); iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK); iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE), IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) | From 39c3b595f3efccab314a9c721c708fde209a94d8 Mon Sep 17 00:00:00 2001 From: Yunjian Wang Date: Fri, 31 May 2024 11:48:47 +0800 Subject: [PATCH 108/374] netfilter: nf_conncount: fix wrong variable type [ Upstream commit 0b88d1654d556264bcd24a9cb6383f0888e30131 ] Now there is a issue is that code checks reports a warning: implicit narrowing conversion from type 'unsigned int' to small type 'u8' (the 'keylen' variable). Fix it by removing the 'keylen' variable. Signed-off-by: Yunjian Wang Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin --- net/netfilter/nf_conncount.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 8715617b02fe..34ba14e59e95 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -321,7 +321,6 @@ insert_tree(struct net *net, struct nf_conncount_rb *rbconn; struct nf_conncount_tuple *conn; unsigned int count = 0, gc_count = 0; - u8 keylen = data->keylen; bool do_gc = true; spin_lock_bh(&nf_conncount_locks[hash]); @@ -333,7 +332,7 @@ insert_tree(struct net *net, rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node); parent = *rbnode; - diff = key_diff(key, rbconn->key, keylen); + diff = key_diff(key, rbconn->key, data->keylen); if (diff < 0) { rbnode = &((*rbnode)->rb_left); } else if (diff > 0) { @@ -378,7 +377,7 @@ insert_tree(struct net *net, conn->tuple = *tuple; conn->zone = *zone; - memcpy(rbconn->key, key, sizeof(u32) * keylen); + memcpy(rbconn->key, key, sizeof(u32) * data->keylen); nf_conncount_list_init(&rbconn->list); list_add(&conn->node, &rbconn->list.head); @@ -403,7 +402,6 @@ count_tree(struct net *net, struct rb_node *parent; struct nf_conncount_rb *rbconn; unsigned int hash; - u8 keylen = data->keylen; hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS; root = &data->root[hash]; @@ -414,7 +412,7 @@ count_tree(struct net *net, rbconn = rb_entry(parent, struct nf_conncount_rb, node); - diff = key_diff(key, rbconn->key, keylen); + diff = key_diff(key, rbconn->key, data->keylen); if (diff < 0) { parent = rcu_dereference_raw(parent->rb_left); } else if (diff > 0) { From 94dc8dc1c038abccc323afcacbae1fa9a3e95666 Mon Sep 17 00:00:00 2001 From: Ziwei Xiao Date: Tue, 25 Jun 2024 00:12:27 +0000 Subject: [PATCH 109/374] gve: Add adminq mutex lock [ Upstream commit 1108566ca509e67aa8abfbf914b1cd31e9ff51f8 ] We were depending on the rtnl_lock to make sure there is only one adminq command running at a time. But some commands may take too long to hold the rtnl_lock, such as the upcoming flow steering operations. For such situations, it can temporarily drop the rtnl_lock, and replace it for these operations with a new adminq lock, which can ensure the adminq command execution to be thread-safe. Signed-off-by: Ziwei Xiao Reviewed-by: Praveen Kaligineedi Reviewed-by: Harshitha Ramamurthy Reviewed-by: Willem de Bruijn Link: https://patch.msgid.link/20240625001232.1476315-2-ziweixiao@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/google/gve/gve.h | 1 + drivers/net/ethernet/google/gve/gve_adminq.c | 22 +++++++++++--------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index ae1e21c9b0a5..ca7fce17f2c0 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -724,6 +724,7 @@ struct gve_priv { union gve_adminq_command *adminq; dma_addr_t adminq_bus_addr; struct dma_pool *adminq_pool; + struct mutex adminq_lock; /* Protects adminq command execution */ u32 adminq_mask; /* masks prod_cnt to adminq size */ u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */ diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 8ca0def176ef..2e0c1eb87b11 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -284,6 +284,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) &priv->reg_bar0->adminq_base_address_lo); iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status); } + mutex_init(&priv->adminq_lock); gve_set_admin_queue_ok(priv); return 0; } @@ -511,28 +512,29 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, return 0; } -/* This function is not threadsafe - the caller is responsible for any - * necessary locks. - * The caller is also responsible for making sure there are no commands - * waiting to be executed. - */ static int gve_adminq_execute_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig) { u32 tail, head; int err; + mutex_lock(&priv->adminq_lock); tail = ioread32be(&priv->reg_bar0->adminq_event_counter); head = priv->adminq_prod_cnt; - if (tail != head) - // This is not a valid path - return -EINVAL; + if (tail != head) { + err = -EINVAL; + goto out; + } err = gve_adminq_issue_cmd(priv, cmd_orig); if (err) - return err; + goto out; - return gve_adminq_kick_and_wait(priv); + err = gve_adminq_kick_and_wait(priv); + +out: + mutex_unlock(&priv->adminq_lock); + return err; } /* The device specifies that the management vector can either be the first irq From 3cca098c91391b3fa48142bfda57048b985c87f6 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 25 Jun 2024 19:51:09 +0300 Subject: [PATCH 110/374] wifi: iwlwifi: mvm: use IWL_FW_CHECK for link ID check [ Upstream commit 9215152677d4b321801a92b06f6d5248b2b4465f ] The lookup function iwl_mvm_rcu_fw_link_id_to_link_conf() is normally called with input from the firmware, so it should use IWL_FW_CHECK() instead of WARN_ON(). Signed-off-by: Johannes Berg Signed-off-by: Miri Korenblit Link: https://patch.msgid.link/20240625194805.4ea8fb7c47d4.I1c22af213f97f69bfc14674502511c1bc504adfb@changeid Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index ded094b6b63d..bc40242aaadd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1442,7 +1442,8 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu) static inline struct ieee80211_bss_conf * iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu) { - if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))) + if (IWL_FW_CHECK(mvm, link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf), + "erroneous FW link ID: %d\n", link_id)) return NULL; if (rcu) From 925fd8ee80d5348a5e965548e5484d164d19221d Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 20 Jun 2024 12:52:17 +0200 Subject: [PATCH 111/374] udf: Avoid excessive partition lengths [ Upstream commit ebbe26fd54a9621994bc16b14f2ba8f84c089693 ] Avoid mounting filesystems where the partition would overflow the 32-bits used for block number. Also refuse to mount filesystems where the partition length is so large we cannot safely index bits in a block bitmap. Link: https://patch.msgid.link/20240620130403.14731-1-jack@suse.cz Signed-off-by: Jan Kara Signed-off-by: Sasha Levin --- fs/udf/super.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/fs/udf/super.c b/fs/udf/super.c index 92d477053905..3460ecc826d1 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1111,12 +1111,19 @@ static int udf_fill_partdesc_info(struct super_block *sb, struct udf_part_map *map; struct udf_sb_info *sbi = UDF_SB(sb); struct partitionHeaderDesc *phd; + u32 sum; int err; map = &sbi->s_partmaps[p_index]; map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */ map->s_partition_root = le32_to_cpu(p->partitionStartingLocation); + if (check_add_overflow(map->s_partition_root, map->s_partition_len, + &sum)) { + udf_err(sb, "Partition %d has invalid location %u + %u\n", + p_index, map->s_partition_root, map->s_partition_len); + return -EFSCORRUPTED; + } if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY)) map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY; @@ -1172,6 +1179,14 @@ static int udf_fill_partdesc_info(struct super_block *sb, bitmap->s_extPosition = le32_to_cpu( phd->unallocSpaceBitmap.extPosition); map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP; + /* Check whether math over bitmap won't overflow. */ + if (check_add_overflow(map->s_partition_len, + sizeof(struct spaceBitmapDesc) << 3, + &sum)) { + udf_err(sb, "Partition %d is too long (%u)\n", p_index, + map->s_partition_len); + return -EFSCORRUPTED; + } udf_debug("unallocSpaceBitmap (part %d) @ %u\n", p_index, bitmap->s_extPosition); } From 90cc74552a7306a9c4c09bfd63280153016037ff Mon Sep 17 00:00:00 2001 From: Konstantin Komarov Date: Thu, 30 May 2024 10:55:12 +0300 Subject: [PATCH 112/374] fs/ntfs3: One more reason to mark inode bad [ Upstream commit a0dde5d7a58b6bf9184ef3d8c6e62275c3645584 ] In addition to returning an error, mark the node as bad. Signed-off-by: Konstantin Komarov Signed-off-by: Sasha Levin --- fs/ntfs3/frecord.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index ded451a84b77..7a73df871037 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -1601,8 +1601,10 @@ int ni_delete_all(struct ntfs_inode *ni) asize = le32_to_cpu(attr->size); roff = le16_to_cpu(attr->nres.run_off); - if (roff > asize) + if (roff > asize) { + _ntfs_bad_inode(&ni->vfs_inode); return -EINVAL; + } /* run==1 means unpack and deallocate. */ run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn, From f80c65d1e9a69a66baff82765b74c6c6d934e2f4 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 27 Mar 2024 09:04:42 -0700 Subject: [PATCH 113/374] riscv: kprobes: Use patch_text_nosync() for insn slots MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit b1756750a397f36ddc857989d31887c3f5081fb0 ] These instructions are not yet visible to the rest of the system, so there is no need to do the whole stop_machine() dance. Reviewed-by: Björn Töpel Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20240327160520.791322-4-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt Signed-off-by: Sasha Levin --- arch/riscv/kernel/probes/kprobes.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index dfb28e57d900..03cd103b8449 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -29,9 +29,8 @@ static void __kprobes arch_prepare_ss_slot(struct kprobe *p) p->ainsn.api.restore = (unsigned long)p->addr + offset; - patch_text(p->ainsn.api.insn, &p->opcode, 1); - patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset), - &insn, 1); + patch_text_nosync(p->ainsn.api.insn, &p->opcode, 1); + patch_text_nosync(p->ainsn.api.insn + offset, &insn, 1); } static void __kprobes arch_prepare_simulate(struct kprobe *p) From 606cd754cad793cb6310a020a3e2ff4d5dd9c9a7 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 26 Jun 2024 12:59:13 +0200 Subject: [PATCH 114/374] media: vivid: fix wrong sizeimage value for mplane [ Upstream commit 0fd7c0c2c156270dceb8c15fad3120cdce03e539 ] In several places a division by fmt->vdownsampling[p] was missing in the sizeimage[p] calculation, causing incorrect behavior for multiplanar formats were some planes are smaller than the first plane. Found by new v4l2-compliance tests. Signed-off-by: Hans Verkuil Signed-off-by: Sasha Levin --- drivers/media/test-drivers/vivid/vivid-vid-cap.c | 5 +++-- drivers/media/test-drivers/vivid/vivid-vid-out.c | 16 +++++++++------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c index 2804975fe278..3a3041a0378f 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c @@ -106,8 +106,9 @@ static int vid_cap_queue_setup(struct vb2_queue *vq, if (*nplanes != buffers) return -EINVAL; for (p = 0; p < buffers; p++) { - if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h + - dev->fmt_cap->data_offset[p]) + if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h / + dev->fmt_cap->vdownsampling[p] + + dev->fmt_cap->data_offset[p]) return -EINVAL; } } else { diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c index 1653b2988f7e..7a0f4c61ac80 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-out.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c @@ -63,14 +63,16 @@ static int vid_out_queue_setup(struct vb2_queue *vq, if (sizes[0] < size) return -EINVAL; for (p = 1; p < planes; p++) { - if (sizes[p] < dev->bytesperline_out[p] * h + - vfmt->data_offset[p]) + if (sizes[p] < dev->bytesperline_out[p] * h / + vfmt->vdownsampling[p] + + vfmt->data_offset[p]) return -EINVAL; } } else { for (p = 0; p < planes; p++) - sizes[p] = p ? dev->bytesperline_out[p] * h + - vfmt->data_offset[p] : size; + sizes[p] = p ? dev->bytesperline_out[p] * h / + vfmt->vdownsampling[p] + + vfmt->data_offset[p] : size; } *nplanes = planes; @@ -124,7 +126,7 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb) for (p = 0; p < planes; p++) { if (p) - size = dev->bytesperline_out[p] * h; + size = dev->bytesperline_out[p] * h / vfmt->vdownsampling[p]; size += vb->planes[p].data_offset; if (vb2_get_plane_payload(vb, p) < size) { @@ -331,8 +333,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv, for (p = 0; p < mp->num_planes; p++) { mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p]; mp->plane_fmt[p].sizeimage = - mp->plane_fmt[p].bytesperline * mp->height + - fmt->data_offset[p]; + mp->plane_fmt[p].bytesperline * mp->height / + fmt->vdownsampling[p] + fmt->data_offset[p]; } for (p = fmt->buffers; p < fmt->planes; p++) { unsigned stride = dev->bytesperline_out[p]; From 8aa2cd9f37d7e9c61d8d7bd78c86ac9af720c1fd Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 6 Jun 2024 20:29:18 +0300 Subject: [PATCH 115/374] leds: spi-byte: Call of_node_put() on error path [ Upstream commit 7f9ab862e05c5bc755f65bf6db7edcffb3b49dfc ] Add a missing call to of_node_put(np) on error. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20240606173037.3091598-2-andriy.shevchenko@linux.intel.com Signed-off-by: Lee Jones Signed-off-by: Sasha Levin --- drivers/leds/leds-spi-byte.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c index 96296db5f410..b04cf502e603 100644 --- a/drivers/leds/leds-spi-byte.c +++ b/drivers/leds/leds-spi-byte.c @@ -91,7 +91,6 @@ static int spi_byte_probe(struct spi_device *spi) dev_err(dev, "Device must have exactly one LED sub-node."); return -EINVAL; } - child = of_get_next_available_child(dev_of_node(dev), NULL); led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL); if (!led) @@ -104,11 +103,13 @@ static int spi_byte_probe(struct spi_device *spi) led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value; led->ldev.brightness_set_blocking = spi_byte_brightness_set_blocking; + child = of_get_next_available_child(dev_of_node(dev), NULL); state = of_get_property(child, "default-state", NULL); if (state) { if (!strcmp(state, "on")) { led->ldev.brightness = led->ldev.max_brightness; } else if (strcmp(state, "off")) { + of_node_put(child); /* all other cases except "off" */ dev_err(dev, "default-state can only be 'on' or 'off'"); return -EINVAL; @@ -123,9 +124,12 @@ static int spi_byte_probe(struct spi_device *spi) ret = devm_led_classdev_register_ext(&spi->dev, &led->ldev, &init_data); if (ret) { + of_node_put(child); mutex_destroy(&led->mutex); return ret; } + + of_node_put(child); spi_set_drvdata(spi, led); return 0; From d45b1c62d8aff0b98d9c1e64c75cc87299597d00 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Mon, 17 Jun 2024 14:26:09 +0200 Subject: [PATCH 116/374] wifi: brcmsmac: advertise MFP_CAPABLE to enable WPA3 [ Upstream commit dbb5265a5d7cca1cdba7736dba313ab7d07bc19d ] After being asked about support for WPA3 for BCM43224 chipset it was found that all it takes is setting the MFP_CAPABLE flag and mac80211 will take care of all that is needed [1]. Link: https://lore.kernel.org/linux-wireless/20200526155909.5807-2-Larry.Finger@lwfinger.net/ [1] Signed-off-by: Arend van Spriel Tested-by: Reijer Boekhoff Signed-off-by: Kalle Valo Link: https://patch.msgid.link/20240617122609.349582-1-arend.vanspriel@broadcom.com Signed-off-by: Sasha Levin --- drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index 92860dc0a92e..676604cb5a22 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -1090,6 +1090,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw) ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, MFP_CAPABLE); hw->extra_tx_headroom = brcms_c_get_header_len(); hw->queues = N_TX_QUEUES; From 08155329af595a2a05bd7f59976df7d878cd8113 Mon Sep 17 00:00:00 2001 From: Chih-Kang Chang Date: Thu, 20 Jun 2024 13:58:23 +0800 Subject: [PATCH 117/374] wifi: rtw89: wow: prevent to send unexpected H2C during download Firmware [ Upstream commit 60757f28408bcc63c4c0676b2a69a38adce30fc7 ] While downloading Firmware in the resume flow, it is possible to receive beacon and send H2C to Firmware. However, if Firmware receives unexpected H2C during the download process, it will fail. Therefore, we prevent to send unexpected H2C during download Firmware in WoWLAN mode. Signed-off-by: Chih-Kang Chang Signed-off-by: Ping-Ke Shih Link: https://patch.msgid.link/20240620055825.17592-6-pkshih@realtek.com Signed-off-by: Sasha Levin --- drivers/net/wireless/realtek/rtw89/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index ddc390d24ec1..ddf45828086d 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -1917,7 +1917,8 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, return; if (ieee80211_is_beacon(hdr->frame_control)) { - if (vif->type == NL80211_IFTYPE_STATION) { + if (vif->type == NL80211_IFTYPE_STATION && + !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) { rtw89_vif_sync_bcn_tsf(rtwvif, hdr, skb->len); rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu); } From d66ec9855b21949de98806a1bdc7452cbd4559e6 Mon Sep 17 00:00:00 2001 From: Shantanu Goel Date: Thu, 6 Jun 2024 23:32:57 -0400 Subject: [PATCH 118/374] usb: uas: set host status byte on data completion error [ Upstream commit 9d32685a251a754f1823d287df233716aa23bcb9 ] Set the host status byte when a data completion error is encountered otherwise the upper layer may end up using the invalid zero'ed data. The following output was observed from scsi/sd.c prior to this fix. [ 11.872824] sd 0:0:0:1: [sdf] tag#9 data cmplt err -75 uas-tag 1 inflight: [ 11.872826] sd 0:0:0:1: [sdf] tag#9 CDB: Read capacity(16) 9e 10 00 00 00 00 00 00 00 00 00 00 00 20 00 00 [ 11.872830] sd 0:0:0:1: [sdf] Sector size 0 reported, assuming 512. Signed-off-by: Shantanu Goel Acked-by: Oliver Neukum Link: https://lore.kernel.org/r/87msnx4ec6.fsf@yahoo.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/storage/uas.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index b610a2de4ae5..a04b4cb1382d 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -423,6 +423,7 @@ static void uas_data_cmplt(struct urb *urb) uas_log_cmd_state(cmnd, "data cmplt err", status); /* error: no data transfered */ scsi_set_resid(cmnd, sdb->length); + set_host_byte(cmnd, DID_ERROR); } else { scsi_set_resid(cmnd, sdb->length - urb->actual_length); } From 6fe9ca2ca389114c8da66e534c18273497843e8a Mon Sep 17 00:00:00 2001 From: Ma Ke Date: Tue, 25 Jun 2024 10:23:06 +0800 Subject: [PATCH 119/374] usb: gadget: aspeed_udc: validate endpoint index for ast udc [ Upstream commit ee0d382feb44ec0f445e2ad63786cd7f3f6a8199 ] We should verify the bound of the array to assure that host may not manipulate the index to point past endpoint array. Found by static analysis. Signed-off-by: Ma Ke Reviewed-by: Andrew Jeffery Acked-by: Andrew Jeffery Link: https://lore.kernel.org/r/20240625022306.2568122-1-make24@iscas.ac.cn Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/gadget/udc/aspeed_udc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c index 821a6ab5da56..f4781e611aaa 100644 --- a/drivers/usb/gadget/udc/aspeed_udc.c +++ b/drivers/usb/gadget/udc/aspeed_udc.c @@ -1009,6 +1009,8 @@ static void ast_udc_getstatus(struct ast_udc_dev *udc) break; case USB_RECIP_ENDPOINT: epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (epnum >= AST_UDC_NUM_ENDPOINTS) + goto stall; status = udc->ep[epnum].stopped; break; default: From c1b85157d4565487c91a57e3daca4f106031c167 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 21 Jun 2024 11:25:30 +0800 Subject: [PATCH 120/374] drm/amdgpu: Fix register access violation [ Upstream commit 9da0f7736763aa0fbf63bb15060c6827135f3f67 ] fault_status is read only register. fault_cntl is not accessible from guest environment. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 8 +++++--- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 ++- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 8 +++++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 77df8c9cbad2..9e10e552952e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -627,9 +627,11 @@ static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev, status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS); fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); - /* reset page fault status */ - WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), - regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1); + if (!amdgpu_sriov_vf(adev)) { + /* clear page fault status and address */ + WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), + regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1); + } return fed; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index f7f492475102..bd55a7e43f07 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -671,7 +671,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) return 0; - WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); + if (!amdgpu_sriov_vf(adev)) + WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 7a1ff298417a..8d7267a013d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -566,9 +566,11 @@ static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev, status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS); fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); - /* reset page fault status */ - WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst, - regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1); + if (!amdgpu_sriov_vf(adev)) { + /* clear page fault status and address */ + WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst, + regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1); + } return fed; } From adc74d25cdbba978afbb57caec23bbcd0329f7b8 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Fri, 7 Jun 2024 09:21:30 -0600 Subject: [PATCH 121/374] drm/amd/display: Run DC_LOG_DC after checking link->link_enc [ Upstream commit 3a82f62b0d9d7687eac47603bb6cd14a50fa718b ] [WHAT] The DC_LOG_DC should be run after link->link_enc is checked, not before. This fixes 1 REVERSE_INULL issue reported by Coverity. Reviewed-by: Rodrigo Siqueira Signed-off-by: Alex Hung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/display/dc/link/link_factory.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index cf22b8f28ba6..72df9bdfb23f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -611,14 +611,14 @@ static bool construct_phy(struct dc_link *link, link->link_enc = link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data); - DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); - DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); - if (!link->link_enc) { DC_ERROR("Failed to create link encoder!\n"); goto link_enc_create_fail; } + DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); + DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); + /* Update link encoder tracking variables. These are used for the dynamic * assignment of link encoders to streams. */ From 47e637193f9a7c4ef1b30f43c015ae7b81658704 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Tue, 11 Jun 2024 10:36:49 -0600 Subject: [PATCH 122/374] drm/amd/display: Check HDCP returned status [ Upstream commit 5d93060d430b359e16e7c555c8f151ead1ac614b ] [WHAT & HOW] Check mod_hdcp_execute_and_set() return values in authenticated_dp. This fixes 3 CHECKED_RETURN issues reported by Coverity. Reviewed-by: Rodrigo Siqueira Signed-off-by: Alex Hung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- .../amd/display/modules/hdcp/hdcp1_execution.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 182e7532dda8..d77836cef563 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -433,17 +433,20 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp, } if (status == MOD_HDCP_STATUS_SUCCESS) - mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, &input->bstatus_read, &status, - hdcp, "bstatus_read"); + hdcp, "bstatus_read")) + goto out; if (status == MOD_HDCP_STATUS_SUCCESS) - mod_hdcp_execute_and_set(check_link_integrity_dp, + if (!mod_hdcp_execute_and_set(check_link_integrity_dp, &input->link_integrity_check, &status, - hdcp, "link_integrity_check"); + hdcp, "link_integrity_check")) + goto out; if (status == MOD_HDCP_STATUS_SUCCESS) - mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, + if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, &input->reauth_request_check, &status, - hdcp, "reauth_request_check"); + hdcp, "reauth_request_check")) + goto out; out: return status; } From 5639a3048c7079803256374204ad55ec52cd0b49 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Tue, 11 Jun 2024 11:45:42 -0600 Subject: [PATCH 123/374] drm/amd/display: Validate function returns [ Upstream commit 673f816b9e1e92d1f70e1bf5f21b531e0ff9ad6c ] [WHAT & HOW] Function return values must be checked before data can be used in subsequent functions. This fixes 4 CHECKED_RETURN issues reported by Coverity. Reviewed-by: Harry Wentland Signed-off-by: Alex Hung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 7 +++++-- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 3 ++- .../drm/amd/display/dc/link/protocols/link_dp_training.c | 3 +-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 2293a92df3be..22d2ab8ce7f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -245,7 +245,9 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun if (status == DMUB_STATUS_POWER_STATE_D3) return false; - dmub_srv_wait_for_idle(dmub, 100000); + status = dmub_srv_wait_for_idle(dmub, 100000); + if (status != DMUB_STATUS_OK) + return false; /* Requeue the command. */ status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); @@ -511,7 +513,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi union dmub_rb_cmd cmd = { 0 }; unsigned int panel_inst = 0; - dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst); + if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst)) + return; memset(&cmd, 0, sizeof(cmd)); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index c6f859871d11..7e4ca2022d64 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -595,7 +595,8 @@ static bool hubbub2_program_watermarks( hubbub1->base.ctx->dc->clk_mgr->clks.p_state_change_support == false) safe_to_lower = true; - hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); + if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index b8e704dbe956..8c0dea6f75bf 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1659,8 +1659,7 @@ bool perform_link_training_with_retries( if (status == LINK_TRAINING_ABORT) { enum dc_connection_type type = dc_connection_none; - link_detect_connection_type(link, &type); - if (type == dc_connection_none) { + if (link_detect_connection_type(link, &type) && type == dc_connection_none) { DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__); break; } From 9738a1d834fb0657a6d351a30d3dcd84be1c57eb Mon Sep 17 00:00:00 2001 From: Bob Zhou Date: Thu, 20 Jun 2024 15:40:06 +0800 Subject: [PATCH 124/374] drm/amdgpu: add missing error handling in function amdgpu_gmc_flush_gpu_tlb_pasid [ Upstream commit 9ff2e14cf013fa887e269bdc5ea3cffacada8635 ] Fix the unchecked return value warning reported by Coverity, so add error handling. Signed-off-by: Bob Zhou Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 86b096ad0319..f4478f2d5305 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -720,7 +720,11 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, ndw += kiq->pmf->invalidate_tlbs_size; spin_lock(&adev->gfx.kiq[inst].ring_lock); - amdgpu_ring_alloc(ring, ndw); + r = amdgpu_ring_alloc(ring, ndw); + if (r) { + spin_unlock(&adev->gfx.kiq[inst].ring_lock); + goto error_unlock_reset; + } if (adev->gmc.flush_tlb_needs_extra_type_2) kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub); From c2056c7a840f0dbf293bc3b0d91826d001668fb0 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 21 Jun 2024 17:53:30 +0800 Subject: [PATCH 125/374] drm/amdgpu: Fix smatch static checker warning [ Upstream commit bdbdc7cecd00305dc844a361f9883d3a21022027 ] adev->gfx.imu.funcs could be NULL Signed-off-by: Hawking Zhang Reviewed-by: Likun Gao Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index ad6431013c73..4ba8eb45ac17 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -4293,11 +4293,11 @@ static int gfx_v11_0_hw_init(void *handle) /* RLC autoload sequence 1: Program rlc ram */ if (adev->gfx.imu.funcs->program_rlc_ram) adev->gfx.imu.funcs->program_rlc_ram(adev); + /* rlc autoload firmware */ + r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); + if (r) + return r; } - /* rlc autoload firmware */ - r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); - if (r) - return r; } else { if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { From 47d7b9a56deb14856676b63bae163b9f17dad320 Mon Sep 17 00:00:00 2001 From: Danijel Slivka Date: Mon, 24 Jun 2024 07:58:24 +0200 Subject: [PATCH 126/374] drm/amdgpu: clear RB_OVERFLOW bit when enabling interrupts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit afbf7955ff01e952dbdd465fa25a2ba92d00291c ] Why: Setting IH_RB_WPTR register to 0 will not clear the RB_OVERFLOW bit if RB_ENABLE is not set. How to fix: Set WPTR_OVERFLOW_CLEAR bit after RB_ENABLE bit is set. The RB_ENABLE bit is required to be set, together with WPTR_OVERFLOW_ENABLE bit so that setting WPTR_OVERFLOW_CLEAR bit would clear the RB_OVERFLOW. Signed-off-by: Danijel Slivka Reviewed-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/ih_v6_0.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index 3cb64c8f7175..18a761d6ef33 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -135,6 +135,34 @@ static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev, tmp = RREG32(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + + if (enable) { + /* Unset the CLEAR_OVERFLOW bit to make sure the next step + * is switching the bit from 0 to 1 + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + } + + /* Clear RB_OVERFLOW bit */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + } + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + } + /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); From 9f216ceb125c38275559782127498ca64fe6ab83 Mon Sep 17 00:00:00 2001 From: Jiwei Sun Date: Thu, 20 Jun 2024 16:51:10 +0800 Subject: [PATCH 127/374] crypto: qat - initialize user_input.lock for rate_limiting [ Upstream commit ccacbbc3176277bbfc324f85fa827d1a2656bedf ] If the following configurations are set, CONFIG_DEBUG_RWSEMS=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_RWSEM_SPIN_ON_OWNER=y And run the following command, [root@localhost sys]# cat /sys/devices/pci0000:6b/0000:6b:00.0/qat_rl/pir The following warning log appears, ------------[ cut here ]------------ DEBUG_RWSEMS_WARN_ON(sem->magic != sem): count = 0x0, magic = 0x0, owner = 0x1, curr 0xff11000119288040, list not empty WARNING: CPU: 131 PID: 1254984 at kernel/locking/rwsem.c:1280 down_read+0x439/0x7f0 CPU: 131 PID: 1254984 Comm: cat Kdump: loaded Tainted: G W 6.10.0-rc4+ #86 b2ae60c8ceabed15f4fd2dba03c1c5a5f7f4040c Hardware name: Lenovo ThinkServer SR660 V3/SR660 V3, BIOS T8E166X-2.54 05/30/2024 RIP: 0010:down_read+0x439/0x7f0 Code: 44 24 10 80 3c 02 00 0f 85 05 03 00 00 48 8b 13 41 54 48 c7 c6 a0 3e 0e b4 48 c7 c7 e0 3e 0e b4 4c 8b 4c 24 08 e8 77 d5 40 fd <0f> 0b 59 e9 bc fc ff ff 0f 1f 44 00 00 e9 e2 fd ff ff 4c 8d 7b 08 RSP: 0018:ffa0000035f67a78 EFLAGS: 00010286 RAX: 0000000000000000 RBX: ff1100012b03a658 RCX: 0000000000000000 RDX: 0000000080000002 RSI: 0000000000000008 RDI: 0000000000000001 RBP: 1ff4000006becf53 R08: fff3fc0006becf17 R09: fff3fc0006becf17 R10: fff3fc0006becf16 R11: ffa0000035f678b7 R12: ffffffffb40e3e60 R13: ffffffffb627d1f4 R14: ff1100012b03a6d0 R15: ff1100012b03a6c8 FS: 00007fa9ff9a6740(0000) GS:ff1100081e600000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fa9ff984000 CR3: 00000002118ae006 CR4: 0000000000771ef0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: pir_show+0x5d/0xe0 [intel_qat 9e297e249ab040329cf58b657b06f418fd5c5855] dev_attr_show+0x3f/0xc0 sysfs_kf_seq_show+0x1ce/0x400 seq_read_iter+0x3fa/0x10b0 vfs_read+0x6f5/0xb20 ksys_read+0xe9/0x1d0 do_syscall_64+0x8a/0x170 entry_SYSCALL_64_after_hwframe+0x76/0x7e RIP: 0033:0x7fa9ff6fd9b2 Code: c0 e9 b2 fe ff ff 50 48 8d 3d ea 1d 0c 00 e8 c5 fd 01 00 0f 1f 44 00 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 0f 05 <48> 3d 00 f0 ff ff 77 56 c3 0f 1f 44 00 00 48 83 ec 28 48 89 54 24 RSP: 002b:00007ffc0616b968 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 RAX: ffffffffffffffda RBX: 0000000000020000 RCX: 00007fa9ff6fd9b2 RDX: 0000000000020000 RSI: 00007fa9ff985000 RDI: 0000000000000003 RBP: 00007fa9ff985000 R08: 00007fa9ff984010 R09: 0000000000000000 R10: 0000000000000022 R11: 0000000000000246 R12: 0000000000022000 R13: 0000000000000003 R14: 0000000000020000 R15: 0000000000020000 irq event stamp: 0 hardirqs last enabled at (0): [<0000000000000000>] 0x0 hardirqs last disabled at (0): [] copy_process+0x21e6/0x6e70 softirqs last enabled at (0): [] copy_process+0x2236/0x6e70 softirqs last disabled at (0): [<0000000000000000>] 0x0 ---[ end trace 0000000000000000 ]--- The rate_limiting->user_input.lock rwsem lock is not initialized before use. Let's initialize it. Signed-off-by: Jiwei Sun Reviewed-by: Adrian Huang Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin --- drivers/crypto/intel/qat/qat_common/adf_rl.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index 346ef8bee99d..e782c23fc1bf 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -1106,6 +1106,7 @@ int adf_rl_init(struct adf_accel_dev *accel_dev) mutex_init(&rl->rl_lock); rl->device_data = &accel_dev->hw_device->rl_data; rl->accel_dev = accel_dev; + init_rwsem(&rl->user_input.lock); accel_dev->rate_limiting = rl; err_ret: From 4fd7a0513d673e22edaf91357587c46e29a19b34 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Mon, 24 Jun 2024 12:52:59 +0300 Subject: [PATCH 128/374] media: vivid: don't set HDMI TX controls if there are no HDMI outputs [ Upstream commit 17763960b1784578e8fe915304b330922f646209 ] When setting the EDID it would attempt to update two controls that are only present if there is an HDMI output configured. If there isn't any (e.g. when the vivid module is loaded with node_types=1), then calling VIDIOC_S_EDID would crash. Fix this by first checking if outputs are present. Signed-off-by: Hans Verkuil Signed-off-by: Sasha Levin --- drivers/media/test-drivers/vivid/vivid-vid-cap.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c index 3a3041a0378f..afa0dc5bcdae 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c @@ -1554,8 +1554,10 @@ int vidioc_s_edid(struct file *file, void *_fh, return -EINVAL; if (edid->blocks == 0) { dev->edid_blocks = 0; - v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0); - v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0); + if (dev->num_outputs) { + v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0); + v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0); + } phys_addr = CEC_PHYS_ADDR_INVALID; goto set_phys_addr; } @@ -1579,8 +1581,10 @@ int vidioc_s_edid(struct file *file, void *_fh, display_present |= dev->display_present[i] << j++; - v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present); - v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present); + if (dev->num_outputs) { + v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present); + v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present); + } set_phys_addr: /* TODO: a proper hotplug detect cycle should be emulated here */ From e37e875e40b5e18c2f7fe6af0baf84f7ef7f27f8 Mon Sep 17 00:00:00 2001 From: Shivaprasad G Bhat Date: Mon, 24 Jun 2024 12:38:58 +0000 Subject: [PATCH 129/374] vfio/spapr: Always clear TCEs before unsetting the window [ Upstream commit 4ba2fdff2eb174114786784926d0efb6903c88a6 ] The PAPR expects the TCE table to have no entries at the time of unset window(i.e. remove-pe). The TCE clear right now is done before freeing the iommu table. On pSeries, the unset window makes those entries inaccessible to the OS and the H_PUT/GET calls fail on them with H_CONSTRAINED. On PowerNV, this has no side effect as the TCE clear can be done before the DMA window removal as well. Signed-off-by: Shivaprasad G Bhat Signed-off-by: Michael Ellerman Link: https://msgid.link/171923273535.1397.1236742071894414895.stgit@linux.ibm.com Signed-off-by: Sasha Levin --- drivers/vfio/vfio_iommu_spapr_tce.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index a94ec6225d31..5f9e7e477078 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -364,7 +364,6 @@ static void tce_iommu_release(void *iommu_data) if (!tbl) continue; - tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); tce_iommu_free_table(container, tbl); } @@ -720,6 +719,8 @@ static long tce_iommu_remove_window(struct tce_container *container, BUG_ON(!tbl->it_size); + tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); + /* Detach groups from IOMMUs */ list_for_each_entry(tcegrp, &container->group_list, next) { table_group = iommu_group_get_iommudata(tcegrp->grp); @@ -738,7 +739,6 @@ static long tce_iommu_remove_window(struct tce_container *container, } /* Free table */ - tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); tce_iommu_free_table(container, tbl); container->tables[num] = NULL; @@ -1197,9 +1197,14 @@ static void tce_iommu_release_ownership(struct tce_container *container, return; } - for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) - if (container->tables[i]) + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { + if (container->tables[i]) { + tce_iommu_clear(container, container->tables[i], + container->tables[i]->it_offset, + container->tables[i]->it_size); table_group->ops->unset_window(table_group, i); + } + } } static long tce_iommu_take_ownership(struct tce_container *container, From 15c770640bcbf7778a43c48c0979224777f1fbdd Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Mon, 10 Jun 2024 14:49:54 +0200 Subject: [PATCH 130/374] fs: don't copy to userspace under namespace semaphore [ Upstream commit cb54ef4f050e7c504ed87114276a296d727e918a ] Don't copy mount ids to userspace while holding the namespace semaphore. We really shouldn't do that and I've gone through lenghts avoiding that in statmount() already. Limit the number of mounts that can be retrieved in one go to 1 million mount ids. That's effectively 10 times the default limt of 100000 mounts that we put on each mount namespace by default. Since listmount() is an iterator limiting the number of mounts retrievable in one go isn't a problem as userspace can just pick up where they left off. Karel menti_ned that libmount will probably be reading the mount table in "in small steps, 512 nodes per request. Nobody likes a tool that takes too long in the kernel, and huge servers are unusual use cases. Libmount will very probably provide API to define size of the step (IDs per request)." Reported-by: Mateusz Guzik Link: https://lore.kernel.org/r/20240610-frettchen-liberal-a9a5c53865f8@brauner Signed-off-by: Christian Brauner Signed-off-by: Sasha Levin --- fs/namespace.c | 98 ++++++++++++++++++++++++++++---------------------- 1 file changed, 56 insertions(+), 42 deletions(-) diff --git a/fs/namespace.c b/fs/namespace.c index 5a51315c6678..57311ecbdf5a 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -5047,55 +5047,81 @@ static struct mount *listmnt_next(struct mount *curr) return node_to_mount(rb_next(&curr->mnt_node)); } -static ssize_t do_listmount(struct mount *first, struct path *orig, - u64 mnt_parent_id, u64 __user *mnt_ids, - size_t nr_mnt_ids, const struct path *root) +static ssize_t do_listmount(u64 mnt_parent_id, u64 last_mnt_id, u64 *mnt_ids, + size_t nr_mnt_ids) { - struct mount *r; + struct path root; + struct mnt_namespace *ns = current->nsproxy->mnt_ns; + struct path orig; + struct mount *r, *first; ssize_t ret; + rwsem_assert_held(&namespace_sem); + + get_fs_root(current->fs, &root); + if (mnt_parent_id == LSMT_ROOT) { + orig = root; + } else { + orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns); + if (!orig.mnt) { + ret = -ENOENT; + goto err; + } + orig.dentry = orig.mnt->mnt_root; + } + /* * Don't trigger audit denials. We just want to determine what * mounts to show users. */ - if (!is_path_reachable(real_mount(orig->mnt), orig->dentry, root) && - !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) - return -EPERM; + if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &root) && + !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) { + ret = -EPERM; + goto err; + } - ret = security_sb_statfs(orig->dentry); + ret = security_sb_statfs(orig.dentry); if (ret) - return ret; + goto err; + + if (!last_mnt_id) + first = node_to_mount(rb_first(&ns->mounts)); + else + first = mnt_find_id_at(ns, last_mnt_id + 1); for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r)) { if (r->mnt_id_unique == mnt_parent_id) continue; - if (!is_path_reachable(r, r->mnt.mnt_root, orig)) + if (!is_path_reachable(r, r->mnt.mnt_root, &orig)) continue; - if (put_user(r->mnt_id_unique, mnt_ids)) - return -EFAULT; + *mnt_ids = r->mnt_id_unique; mnt_ids++; nr_mnt_ids--; ret++; } +err: + path_put(&root); return ret; } -SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, u64 __user *, - mnt_ids, size_t, nr_mnt_ids, unsigned int, flags) +SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, + u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags) { - struct mnt_namespace *ns = current->nsproxy->mnt_ns; + u64 *kmnt_ids __free(kvfree) = NULL; + const size_t maxcount = 1000000; struct mnt_id_req kreq; - struct mount *first; - struct path root, orig; - u64 mnt_parent_id, last_mnt_id; - const size_t maxcount = (size_t)-1 >> 3; ssize_t ret; if (flags) return -EINVAL; + /* + * If the mount namespace really has more than 1 million mounts the + * caller must iterate over the mount namespace (and reconsider their + * system design...). + */ if (unlikely(nr_mnt_ids > maxcount)) - return -EFAULT; + return -EOVERFLOW; if (!access_ok(mnt_ids, nr_mnt_ids * sizeof(*mnt_ids))) return -EFAULT; @@ -5103,33 +5129,21 @@ SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, u64 __user *, ret = copy_mnt_id_req(req, &kreq); if (ret) return ret; - mnt_parent_id = kreq.mnt_id; - last_mnt_id = kreq.param; - down_read(&namespace_sem); - get_fs_root(current->fs, &root); - if (mnt_parent_id == LSMT_ROOT) { - orig = root; - } else { - ret = -ENOENT; - orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns); - if (!orig.mnt) - goto err; - orig.dentry = orig.mnt->mnt_root; - } - if (!last_mnt_id) - first = node_to_mount(rb_first(&ns->mounts)); - else - first = mnt_find_id_at(ns, last_mnt_id + 1); + kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kmnt_ids), + GFP_KERNEL_ACCOUNT); + if (!kmnt_ids) + return -ENOMEM; + + scoped_guard(rwsem_read, &namespace_sem) + ret = do_listmount(kreq.mnt_id, kreq.param, kmnt_ids, nr_mnt_ids); + + if (copy_to_user(mnt_ids, kmnt_ids, ret * sizeof(*mnt_ids))) + return -EFAULT; - ret = do_listmount(first, &orig, mnt_parent_id, mnt_ids, nr_mnt_ids, &root); -err: - path_put(&root); - up_read(&namespace_sem); return ret; } - static void __init init_mount_tree(void) { struct vfsmount *mnt; From dfd2e8eaa64bacbcbcb91980c36b8b9e93293f13 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Mon, 24 Jun 2024 11:49:45 -0400 Subject: [PATCH 131/374] fs: relax permissions for statmount() [ Upstream commit f3107df39df123328a9d3c8f40c006834b37287d ] It is sufficient to have capabilities in the owning user namespace of the mount namespace to stat a mount regardless of whether it's reachable or not. Link: https://lore.kernel.org/r/bf5961d71ec479ba85806766b0d8d96043e67bba.1719243756.git.josef@toxicpanda.com Signed-off-by: Christian Brauner Signed-off-by: Sasha Levin --- fs/namespace.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/namespace.c b/fs/namespace.c index 57311ecbdf5a..4494064205a6 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -4906,6 +4906,7 @@ static int copy_statmount_to_user(struct kstatmount *s) static int do_statmount(struct kstatmount *s) { struct mount *m = real_mount(s->mnt); + struct mnt_namespace *ns = m->mnt_ns; int err; /* @@ -4913,7 +4914,7 @@ static int do_statmount(struct kstatmount *s) * mounts to show users. */ if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) && - !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) + !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) return -EPERM; err = security_sb_statfs(s->mnt->mnt_root); From 68d8156480940b79227d58865ec5d2947b9384a8 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 30 May 2024 19:44:12 -0500 Subject: [PATCH 132/374] powerpc/rtas: Prevent Spectre v1 gadget construction in sys_rtas() [ Upstream commit 0974d03eb479384466d828d65637814bee6b26d7 ] Smatch warns: arch/powerpc/kernel/rtas.c:1932 __do_sys_rtas() warn: potential spectre issue 'args.args' [r] (local cap) The 'nargs' and 'nret' locals come directly from a user-supplied buffer and are used as indexes into a small stack-based array and as inputs to copy_to_user() after they are subject to bounds checks. Use array_index_nospec() after the bounds checks to clamp these values for speculative execution. Signed-off-by: Nathan Lynch Reported-by: Breno Leitao Reviewed-by: Breno Leitao Signed-off-by: Michael Ellerman Link: https://msgid.link/20240530-sys_rtas-nargs-nret-v1-1-129acddd4d89@linux.ibm.com Signed-off-by: Sasha Levin --- arch/powerpc/kernel/rtas.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 8064d9c3de86..f7e86e09c49f 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -1916,6 +1917,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) || nargs + nret > ARRAY_SIZE(args.args)) return -EINVAL; + nargs = array_index_nospec(nargs, ARRAY_SIZE(args.args)); + nret = array_index_nospec(nret, ARRAY_SIZE(args.args) - nargs); + /* Copy in args. */ if (copy_from_user(args.args, uargs->args, nargs * sizeof(rtas_arg_t)) != 0) From 90afe40da0a5e2349eef3be906b37505ffa21664 Mon Sep 17 00:00:00 2001 From: Andrei Vagin Date: Fri, 28 Jun 2024 02:10:12 +0000 Subject: [PATCH 133/374] seccomp: release task filters when the task exits [ Upstream commit bfafe5efa9754ebc991750da0bcca2a6694f3ed3 ] Previously, seccomp filters were released in release_task(), which required the process to exit and its zombie to be collected. However, exited threads/processes can't trigger any seccomp events, making it more logical to release filters upon task exits. This adjustment simplifies scenarios where a parent is tracing its child process. The parent process can now handle all events from a seccomp listening descriptor and then call wait to collect a child zombie. seccomp_filter_release takes the siglock to avoid races with seccomp_sync_threads. There was an idea to bypass taking the lock by checking PF_EXITING, but it can be set without holding siglock if threads have SIGNAL_GROUP_EXIT. This means it can happen concurently with seccomp_filter_release. This change also fixes another minor problem. Suppose that a group leader installs the new filter without SECCOMP_FILTER_FLAG_TSYNC, exits, and becomes a zombie. Without this change, SECCOMP_FILTER_FLAG_TSYNC from any other thread can never succeed, seccomp_can_sync_threads() will check a zombie leader and is_ancestor() will fail. Reviewed-by: Oleg Nesterov Signed-off-by: Andrei Vagin Link: https://lore.kernel.org/r/20240628021014.231976-3-avagin@google.com Reviewed-by: Tycho Andersen Signed-off-by: Kees Cook Signed-off-by: Sasha Levin --- kernel/exit.c | 3 ++- kernel/seccomp.c | 23 ++++++++++++++++++----- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/kernel/exit.c b/kernel/exit.c index 81fcee45d630..be81342caf1b 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -277,7 +277,6 @@ void release_task(struct task_struct *p) } write_unlock_irq(&tasklist_lock); - seccomp_filter_release(p); proc_flush_pid(thread_pid); put_pid(thread_pid); release_thread(p); @@ -834,6 +833,8 @@ void __noreturn do_exit(long code) io_uring_files_cancel(); exit_signals(tsk); /* sets PF_EXITING */ + seccomp_filter_release(tsk); + acct_update_integrals(tsk); group_dead = atomic_dec_and_test(&tsk->signal->live); if (group_dead) { diff --git a/kernel/seccomp.c b/kernel/seccomp.c index e30b60b57614..b02337e95664 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -502,6 +502,9 @@ static inline pid_t seccomp_can_sync_threads(void) /* Skip current, since it is initiating the sync. */ if (thread == caller) continue; + /* Skip exited threads. */ + if (thread->flags & PF_EXITING) + continue; if (thread->seccomp.mode == SECCOMP_MODE_DISABLED || (thread->seccomp.mode == SECCOMP_MODE_FILTER && @@ -563,18 +566,21 @@ static void __seccomp_filter_release(struct seccomp_filter *orig) * @tsk: task the filter should be released from. * * This function should only be called when the task is exiting as - * it detaches it from its filter tree. As such, READ_ONCE() and - * barriers are not needed here, as would normally be needed. + * it detaches it from its filter tree. PF_EXITING has to be set + * for the task. */ void seccomp_filter_release(struct task_struct *tsk) { - struct seccomp_filter *orig = tsk->seccomp.filter; + struct seccomp_filter *orig; - /* We are effectively holding the siglock by not having any sighand. */ - WARN_ON(tsk->sighand != NULL); + if (WARN_ON((tsk->flags & PF_EXITING) == 0)) + return; + spin_lock_irq(&tsk->sighand->siglock); + orig = tsk->seccomp.filter; /* Detach task from its filter tree. */ tsk->seccomp.filter = NULL; + spin_unlock_irq(&tsk->sighand->siglock); __seccomp_filter_release(orig); } @@ -602,6 +608,13 @@ static inline void seccomp_sync_threads(unsigned long flags) if (thread == caller) continue; + /* + * Skip exited threads. seccomp_filter_release could have + * been already called for this task. + */ + if (thread->flags & PF_EXITING) + continue; + /* Get a task reference for the new leaf node. */ get_seccomp_filter(caller); From 9ae799838b290a46b77c2926c73a842565708abe Mon Sep 17 00:00:00 2001 From: Eric Joyner Date: Mon, 17 Jun 2024 14:46:25 +0200 Subject: [PATCH 134/374] ice: Check all ice_vsi_rebuild() errors in function [ Upstream commit d47bf9a495cf424fad674321d943123dc12b926d ] Check the return value from ice_vsi_rebuild() and prevent the usage of incorrectly configured VSI. Reviewed-by: Michal Swiatkowski Reviewed-by: Przemek Kitszel Signed-off-by: Eric Joyner Signed-off-by: Karen Ostrowska Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/ice/ice_main.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f16d13e9ff6e..253689dbf6c3 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4160,13 +4160,17 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) /* set for the next time the netdev is started */ if (!netif_running(vsi->netdev)) { - ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + if (err) + goto rebuild_err; dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); goto done; } ice_vsi_close(vsi); - ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + if (err) + goto rebuild_err; ice_for_each_traffic_class(i) { if (vsi->tc_cfg.ena_tc & BIT(i)) @@ -4177,6 +4181,11 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) } ice_pf_dcb_recfg(pf, locked); ice_vsi_open(vsi); + goto done; + +rebuild_err: + dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n", + err); done: clear_bit(ICE_CFG_BUSY, pf->state); return err; From dd47051c76c8acd8cb983f01b4d1265da29cb66a Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Fri, 28 Jun 2024 13:45:29 +0200 Subject: [PATCH 135/374] PCI: keystone: Add workaround for Errata #i2037 (AM65x SR 1.0) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 86f271f22bbb6391410a07e08d6ca3757fda01fa ] Errata #i2037 in AM65x/DRA80xM Processors Silicon Revision 1.0 (SPRZ452D_July 2018_Revised December 2019 [1]) mentions when an inbound PCIe TLP spans more than two internal AXI 128-byte bursts, the bus may corrupt the packet payload and the corrupt data may cause associated applications or the processor to hang. The workaround for Errata #i2037 is to limit the maximum read request size and maximum payload size to 128 bytes. Add workaround for Errata #i2037 here. The errata and workaround is applicable only to AM65x SR 1.0 and later versions of the silicon will have this fixed. [1] -> https://www.ti.com/lit/er/sprz452i/sprz452i.pdf Link: https://lore.kernel.org/linux-pci/16e1fcae-1ea7-46be-b157-096e05661b15@siemens.com Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Achal Verma Signed-off-by: Vignesh Raghavendra Signed-off-by: Jan Kiszka Signed-off-by: Krzysztof Wilczyński Reviewed-by: Siddharth Vadapalli Signed-off-by: Sasha Levin --- drivers/pci/controller/dwc/pci-keystone.c | 44 ++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index cd0e0022f91d..483c95406513 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -34,6 +34,11 @@ #define PCIE_DEVICEID_SHIFT 16 /* Application registers */ +#define PID 0x000 +#define RTL GENMASK(15, 11) +#define RTL_SHIFT 11 +#define AM6_PCI_PG1_RTL_VER 0x15 + #define CMD_STATUS 0x004 #define LTSSM_EN_VAL BIT(0) #define OB_XLAT_EN_VAL BIT(1) @@ -104,6 +109,8 @@ #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) +#define PCI_DEVICE_ID_TI_AM654X 0xb00c + struct ks_pcie_of_data { enum dw_pcie_device_mode mode; const struct dw_pcie_host_ops *host_ops; @@ -516,7 +523,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci) static void ks_pcie_quirk(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; + struct keystone_pcie *ks_pcie; + struct device *bridge_dev; struct pci_dev *bridge; + u32 val; + static const struct pci_device_id rc_pci_devids[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, @@ -528,6 +539,11 @@ static void ks_pcie_quirk(struct pci_dev *dev) .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { 0, }, }; + static const struct pci_device_id am6_pci_devids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X), + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { 0, }, + }; if (pci_is_root_bus(bus)) bridge = dev; @@ -549,10 +565,36 @@ static void ks_pcie_quirk(struct pci_dev *dev) */ if (pci_match_id(rc_pci_devids, bridge)) { if (pcie_get_readrq(dev) > 256) { - dev_info(&dev->dev, "limiting MRRS to 256\n"); + dev_info(&dev->dev, "limiting MRRS to 256 bytes\n"); pcie_set_readrq(dev, 256); } } + + /* + * Memory transactions fail with PCI controller in AM654 PG1.0 + * when MRRS is set to more than 128 bytes. Force the MRRS to + * 128 bytes in all downstream devices. + */ + if (pci_match_id(am6_pci_devids, bridge)) { + bridge_dev = pci_get_host_bridge_device(dev); + if (!bridge_dev && !bridge_dev->parent) + return; + + ks_pcie = dev_get_drvdata(bridge_dev->parent); + if (!ks_pcie) + return; + + val = ks_pcie_app_readl(ks_pcie, PID); + val &= RTL; + val >>= RTL_SHIFT; + if (val != AM6_PCI_PG1_RTL_VER) + return; + + if (pcie_get_readrq(dev) > 128) { + dev_info(&dev->dev, "limiting MRRS to 128 bytes\n"); + pcie_set_readrq(dev, 128); + } + } } DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); From 4afc9cda8adefbd82414d34d8072264ebf79fceb Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Sun, 9 Jun 2024 16:47:53 -0700 Subject: [PATCH 136/374] Input: ili210x - use kvmalloc() to allocate buffer for firmware update [ Upstream commit 17f5eebf6780eee50f887542e1833fda95f53e4d ] Allocating a contiguous buffer of 64K may fail if memory is sufficiently fragmented, and may cause OOM kill of an unrelated process. However we do not need to have contiguous memory. We also do not need to zero out the buffer since it will be overwritten with firmware data. Switch to using kvmalloc() instead of kzalloc(). Link: https://lore.kernel.org/r/20240609234757.610273-1-dmitry.torokhov@gmail.com Signed-off-by: Dmitry Torokhov Signed-off-by: Sasha Levin --- drivers/input/touchscreen/ili210x.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c index 79bdb2b10949..f3c3ad70244f 100644 --- a/drivers/input/touchscreen/ili210x.c +++ b/drivers/input/touchscreen/ili210x.c @@ -597,7 +597,7 @@ static int ili251x_firmware_to_buffer(const struct firmware *fw, * once, copy them all into this buffer at the right locations, and then * do all operations on this linear buffer. */ - fw_buf = kzalloc(SZ_64K, GFP_KERNEL); + fw_buf = kvmalloc(SZ_64K, GFP_KERNEL); if (!fw_buf) return -ENOMEM; @@ -627,7 +627,7 @@ static int ili251x_firmware_to_buffer(const struct firmware *fw, return 0; err_big: - kfree(fw_buf); + kvfree(fw_buf); return error; } @@ -870,7 +870,7 @@ static ssize_t ili210x_firmware_update_store(struct device *dev, ili210x_hardware_reset(priv->reset_gpio); dev_dbg(dev, "Firmware update ended, error=%i\n", error); enable_irq(client->irq); - kfree(fwbuf); + kvfree(fwbuf); return error; } From b86c6b2ab1c725e6b47426f9a09de68ebaa7fce6 Mon Sep 17 00:00:00 2001 From: Chen Ni Date: Fri, 21 Jun 2024 09:35:22 +0800 Subject: [PATCH 137/374] media: qcom: camss: Add check for v4l2_fwnode_endpoint_parse [ Upstream commit 4caf6d93d9f2c11d6441c64e1c549c445fa322ed ] Add check for the return value of v4l2_fwnode_endpoint_parse() and return the error if it fails in order to catch the error. Signed-off-by: Chen Ni Signed-off-by: Hans Verkuil Signed-off-by: Sasha Levin --- drivers/media/platform/qcom/camss/camss.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c index 1923615f0eea..c90a28fa8891 100644 --- a/drivers/media/platform/qcom/camss/camss.c +++ b/drivers/media/platform/qcom/camss/camss.c @@ -1406,8 +1406,11 @@ static int camss_of_parse_endpoint_node(struct device *dev, struct v4l2_mbus_config_mipi_csi2 *mipi_csi2; struct v4l2_fwnode_endpoint vep = { { 0 } }; unsigned int i; + int ret; - v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep); + ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep); + if (ret) + return ret; csd->interface.csiphy_id = vep.base.port; From 67c07958facc0c15a7293143c48ddd8fd4c02fb9 Mon Sep 17 00:00:00 2001 From: Jules Irenge Date: Sun, 12 May 2024 23:31:21 +0100 Subject: [PATCH 138/374] pcmcia: Use resource_size function on resource object [ Upstream commit 24a025497e7e883bd2adef5d0ece1e9b9268009f ] Cocinnele reports a warning WARNING: Suspicious code. resource_size is maybe missing with root The root cause is the function resource_size is not used when needed Use resource_size() on variable "root" of type resource Signed-off-by: Jules Irenge Signed-off-by: Dominik Brodowski Signed-off-by: Sasha Levin --- drivers/pcmcia/yenta_socket.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index 1365eaa20ff4..ff169124929c 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -638,11 +638,11 @@ static int yenta_search_one_res(struct resource *root, struct resource *res, start = PCIBIOS_MIN_CARDBUS_IO; end = ~0U; } else { - unsigned long avail = root->end - root->start; + unsigned long avail = resource_size(root); int i; size = BRIDGE_MEM_MAX; - if (size > avail/8) { - size = (avail+1)/8; + if (size > (avail - 1) / 8) { + size = avail / 8; /* round size down to next power of 2 */ i = 0; while ((size /= 2) != 0) From 11f997143c67680d6e40a13363618380cd57a414 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Tue, 18 Jun 2024 16:21:20 -0600 Subject: [PATCH 139/374] drm/amd/display: Check denominator pbn_div before used [ Upstream commit 116a678f3a9abc24f5c9d2525b7393d18d9eb58e ] [WHAT & HOW] A denominator cannot be 0, and is checked before used. This fixes 1 DIVIDE_BY_ZERO issue reported by Coverity. Reviewed-by: Harry Wentland Signed-off-by: Jerry Zuo Signed-off-by: Alex Hung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0627961b7115..27e641f17628 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7302,7 +7302,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, } } - if (j == dc_state->stream_count) + if (j == dc_state->stream_count || pbn_div == 0) continue; slot_num = DIV_ROUND_UP(pbn, pbn_div); From ede06d23392529b039cf7ac11b5875b047900f1c Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Tue, 18 Jun 2024 16:19:48 -0600 Subject: [PATCH 140/374] drm/amd/display: Check denominator crb_pipes before used [ Upstream commit ea79068d4073bf303f8203f2625af7d9185a1bc6 ] [WHAT & HOW] A denominator cannot be 0, and is checked before used. This fixes 2 DIVIDE_BY_ZERO issues reported by Coverity. Reviewed-by: Harry Wentland Signed-off-by: Jerry Zuo Signed-off-by: Alex Hung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- .../gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 4ce0f4bf1d9b..3329eaecfb15 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -1756,7 +1756,7 @@ static int dcn315_populate_dml_pipes_from_context( bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); - if (remaining_det_segs > MIN_RESERVED_DET_SEGS) + if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0) pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { From 9073d2cb8fdb5a4ca7cad597e30a4b5084445f71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= Date: Sat, 1 Jun 2024 16:36:27 -0400 Subject: [PATCH 141/374] drm/amdgpu: check for LINEAR_ALIGNED correctly in check_tiling_flags_gfx6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 11317d2963fa79767cd7c6231a00a9d77f2e0f54 ] Fix incorrect check. Signed-off-by: Marek Olšák Acked-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 3ecc7ef95172..4fcc227db00b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -917,8 +917,7 @@ static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb) { u64 micro_tile_mode; - /* Zero swizzle mode means linear */ - if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0) + if (AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) == 1) /* LINEAR_ALIGNED */ return 0; micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE); From 14c9b54b28e0d24a62acc7ddbfcfddf85b110d96 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 28 Jun 2024 16:47:36 +0800 Subject: [PATCH 142/374] drm/amdgpu: Correct register used to clear fault status [ Upstream commit c2fad7317441be573175c4d98b28347ddec7fe77 ] Driver should write to fault_cntl registers to do one-shot address/status clear. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 8d7267a013d2..621761a17ac7 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -569,7 +569,7 @@ static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev, if (!amdgpu_sriov_vf(adev)) { /* clear page fault status and address */ WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst, - regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1); + regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1); } return fed; From 79cc2b84ad24cf30095355c4ee3d71af01da0cd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= Date: Sat, 1 Jun 2024 19:59:34 -0400 Subject: [PATCH 143/374] drm/amdgpu/display: handle gfx12 in amdgpu_dm_plane_format_mod_supported MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit ed17b63e7e25f03b40db66a8d5802b89aac40441 ] All this code has undefined behavior on GFX12 and shouldn't be executed. Signed-off-by: Marek Olšák Acked-by: Alex Deucher Reviewed-by: Aurabindo Pillai Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- .../amd/display/amdgpu_dm/amdgpu_dm_plane.c | 47 ++++++++++--------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 70e45d980bb9..7d47acdd11d5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -1400,8 +1400,6 @@ static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane, const struct drm_format_info *info = drm_format_info(format); int i; - enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3; - if (!info) return false; @@ -1423,29 +1421,34 @@ static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane, if (i == plane->modifier_count) return false; - /* - * For D swizzle the canonical modifier depends on the bpp, so check - * it here. - */ - if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && - adev->family >= AMDGPU_FAMILY_NV) { - if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) - return false; - } - - if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && - info->cpp[0] < 8) - return false; + /* GFX12 doesn't have these limitations. */ + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11) { + enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3; - if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { - /* Per radeonsi comments 16/64 bpp are more complicated. */ - if (info->cpp[0] != 4) - return false; - /* We support multi-planar formats, but not when combined with - * additional DCC metadata planes. + /* + * For D swizzle the canonical modifier depends on the bpp, so check + * it here. */ - if (info->num_planes > 1) + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && + adev->family >= AMDGPU_FAMILY_NV) { + if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) + return false; + } + + if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && + info->cpp[0] < 8) return false; + + if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { + /* Per radeonsi comments 16/64 bpp are more complicated. */ + if (info->cpp[0] != 4) + return false; + /* We support multi-planar formats, but not when combined with + * additional DCC metadata planes. + */ + if (info->num_planes > 1) + return false; + } } return true; From abb0a615569ec008e8a93d9f3ab2d5b418ea94d4 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Mon, 22 Jul 2024 12:28:42 -0700 Subject: [PATCH 144/374] can: bcm: Remove proc entry when dev is unregistered. [ Upstream commit 76fe372ccb81b0c89b6cd2fec26e2f38c958be85 ] syzkaller reported a warning in bcm_connect() below. [0] The repro calls connect() to vxcan1, removes vxcan1, and calls connect() with ifindex == 0. Calling connect() for a BCM socket allocates a proc entry. Then, bcm_sk(sk)->bound is set to 1 to prevent further connect(). However, removing the bound device resets bcm_sk(sk)->bound to 0 in bcm_notify(). The 2nd connect() tries to allocate a proc entry with the same name and sets NULL to bcm_sk(sk)->bcm_proc_read, leaking the original proc entry. Since the proc entry is available only for connect()ed sockets, let's clean up the entry when the bound netdev is unregistered. [0]: proc_dir_entry 'can-bcm/2456' already registered WARNING: CPU: 1 PID: 394 at fs/proc/generic.c:376 proc_register+0x645/0x8f0 fs/proc/generic.c:375 Modules linked in: CPU: 1 PID: 394 Comm: syz-executor403 Not tainted 6.10.0-rc7-g852e42cc2dd4 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014 RIP: 0010:proc_register+0x645/0x8f0 fs/proc/generic.c:375 Code: 00 00 00 00 00 48 85 ed 0f 85 97 02 00 00 4d 85 f6 0f 85 9f 02 00 00 48 c7 c7 9b cb cf 87 48 89 de 4c 89 fa e8 1c 6f eb fe 90 <0f> 0b 90 90 48 c7 c7 98 37 99 89 e8 cb 7e 22 05 bb 00 00 00 10 48 RSP: 0018:ffa0000000cd7c30 EFLAGS: 00010246 RAX: 9e129be1950f0200 RBX: ff1100011b51582c RCX: ff1100011857cd80 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000002 RBP: 0000000000000000 R08: ffd400000000000f R09: ff1100013e78cac0 R10: ffac800000cd7980 R11: ff1100013e12b1f0 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000000 R15: ff1100011a99a2ec FS: 00007fbd7086f740(0000) GS:ff1100013fd00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000200071c0 CR3: 0000000118556004 CR4: 0000000000771ef0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: proc_create_net_single+0x144/0x210 fs/proc/proc_net.c:220 bcm_connect+0x472/0x840 net/can/bcm.c:1673 __sys_connect_file net/socket.c:2049 [inline] __sys_connect+0x5d2/0x690 net/socket.c:2066 __do_sys_connect net/socket.c:2076 [inline] __se_sys_connect net/socket.c:2073 [inline] __x64_sys_connect+0x8f/0x100 net/socket.c:2073 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xd9/0x1c0 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x4b/0x53 RIP: 0033:0x7fbd708b0e5d Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 73 9f 1b 00 f7 d8 64 89 01 48 RSP: 002b:00007fff8cd33f08 EFLAGS: 00000246 ORIG_RAX: 000000000000002a RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007fbd708b0e5d RDX: 0000000000000010 RSI: 0000000020000040 RDI: 0000000000000003 RBP: 0000000000000000 R08: 0000000000000040 R09: 0000000000000040 R10: 0000000000000040 R11: 0000000000000246 R12: 00007fff8cd34098 R13: 0000000000401280 R14: 0000000000406de8 R15: 00007fbd70ab9000 remove_proc_entry: removing non-empty directory 'net/can-bcm', leaking at least '2456' Fixes: ffd980f976e7 ("[CAN]: Add broadcast manager (bcm) protocol") Reported-by: syzkaller Signed-off-by: Kuniyuki Iwashima Reviewed-by: Simon Horman Link: https://lore.kernel.org/all/20240722192842.37421-1-kuniyu@amazon.com Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- net/can/bcm.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/can/bcm.c b/net/can/bcm.c index 27d5fcf0eac9..46d3ec3aa44b 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1470,6 +1470,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg, /* remove device reference, if this is our bound device */ if (bo->bound && bo->ifindex == dev->ifindex) { +#if IS_ENABLED(CONFIG_PROC_FS) + if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) + remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir); +#endif bo->bound = 0; bo->ifindex = 0; notify_enodev = 1; From abf296d242cde80f003dddab044e987bc141a73a Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Mon, 5 Aug 2024 15:01:58 +0100 Subject: [PATCH 145/374] can: m_can: Release irq on error in m_can_open [ Upstream commit 06d4ef3056a7ac31be331281bb7a6302ef5a7f8a ] It appears that the irq requested in m_can_open() may be leaked if an error subsequently occurs: if m_can_start() fails. Address this by calling free_irq in the unwind path for such cases. Flagged by Smatch. Compile tested only. Fixes: eaacfeaca7ad ("can: m_can: Call the RAM init directly from m_can_chip_config") Acked-by: Marc Kleine-Budde Signed-off-by: Simon Horman Link: https://lore.kernel.org/all/20240805-mcan-irq-v2-1-7154c0484819@kernel.org Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- drivers/net/can/m_can/m_can.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 14b231c4d7ec..205a6cb4470f 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -2009,7 +2009,7 @@ static int m_can_open(struct net_device *dev) /* start the m_can controller */ err = m_can_start(dev); if (err) - goto exit_irq_fail; + goto exit_start_fail; if (!cdev->is_peripheral) napi_enable(&cdev->napi); @@ -2018,6 +2018,9 @@ static int m_can_open(struct net_device *dev) return 0; +exit_start_fail: + if (cdev->is_peripheral || dev->irq) + free_irq(dev->irq, dev); exit_irq_fail: if (cdev->is_peripheral) destroy_workqueue(cdev->tx_wq); From 7eab2b893d5e517231956ebb83ad90f338e7a269 Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Mon, 5 Aug 2024 20:30:41 +0200 Subject: [PATCH 146/374] can: m_can: Reset coalescing during suspend/resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit a651261ac74298535f6d6316ebe27beceb6b17b1 ] During resume the interrupts are limited to IR_RF0N and the chip keeps running. In this case if coalescing is enabled and active we may miss waterlevel interrupts during suspend. It is safer to reset the coalescing by stopping the timer and adding IR_RF0N | IR_TEFN to the interrupts. This is a theoratical issue and probably extremely rare. Cc: Martin Hundebøll Fixes: 4a94d7e31cf5 ("can: m_can: allow keeping the transceiver running in suspend") Signed-off-by: Markus Schneider-Pargmann Link: https://lore.kernel.org/all/20240805183047.305630-2-msp@baylibre.com Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- drivers/net/can/m_can/m_can.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 205a6cb4470f..257d5bc0ae9e 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -2387,12 +2387,15 @@ int m_can_class_suspend(struct device *dev) netif_device_detach(ndev); /* leave the chip running with rx interrupt enabled if it is - * used as a wake-up source. + * used as a wake-up source. Coalescing needs to be reset then, + * the timer is cancelled here, interrupts are done in resume. */ - if (cdev->pm_wake_source) + if (cdev->pm_wake_source) { + hrtimer_cancel(&cdev->hrtimer); m_can_write(cdev, M_CAN_IE, IR_RF0N); - else + } else { m_can_stop(ndev); + } m_can_clk_stop(cdev); } @@ -2422,6 +2425,13 @@ int m_can_class_resume(struct device *dev) return ret; if (cdev->pm_wake_source) { + /* Restore active interrupts but disable coalescing as + * we may have missed important waterlevel interrupts + * between suspend and resume. Timers are already + * stopped in suspend. Here we enable all interrupts + * again. + */ + cdev->active_interrupts |= IR_RF0N | IR_TEFN; m_can_write(cdev, M_CAN_IE, cdev->active_interrupts); } else { ret = m_can_start(ndev); From 79ae3475c9cfead96baff7971c768e03a47010dd Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Mon, 5 Aug 2024 20:30:42 +0200 Subject: [PATCH 147/374] can: m_can: Remove coalesing disable in isr during suspend [ Upstream commit 6eff1cead75ff330bb33264424c1da6cc7179ab8 ] We don't need to disable coalescing when the interrupt handler executes while the chip is suspended. The coalescing is already reset during suspend. Fixes: 07f25091ca02 ("can: m_can: Implement receive coalescing") Signed-off-by: Markus Schneider-Pargmann Link: https://lore.kernel.org/all/20240805183047.305630-3-msp@baylibre.com Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- drivers/net/can/m_can/m_can.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 257d5bc0ae9e..dba1788f7fbb 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1189,10 +1189,8 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) struct m_can_classdev *cdev = netdev_priv(dev); u32 ir; - if (pm_runtime_suspended(cdev->dev)) { - m_can_coalescing_disable(cdev); + if (pm_runtime_suspended(cdev->dev)) return IRQ_NONE; - } ir = m_can_read(cdev, M_CAN_IR); m_can_coalescing_update(cdev, ir); From 83e5c41d1ecd7cf36b75014471e5526f39496e09 Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Mon, 5 Aug 2024 20:30:43 +0200 Subject: [PATCH 148/374] can: m_can: Remove m_can_rx_peripheral indirection [ Upstream commit 40e4552eeef0e3090a5988de15889795936fd38f ] m_can_rx_peripheral() is a wrapper around m_can_rx_handler() that calls m_can_disable_all_interrupts() on error. The same handling for the same error path is done in m_can_isr() as well. So remove m_can_rx_peripheral() and do the call from m_can_isr() directly. Signed-off-by: Markus Schneider-Pargmann Link: https://lore.kernel.org/all/20240805183047.305630-4-msp@baylibre.com Signed-off-by: Marc Kleine-Budde Stable-dep-of: 4d5159bfafa8 ("can: m_can: Do not cancel timer from within timer") Signed-off-by: Sasha Levin --- drivers/net/can/m_can/m_can.c | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index dba1788f7fbb..2d73fa7f8258 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1003,22 +1003,6 @@ static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus) return work_done; } -static int m_can_rx_peripheral(struct net_device *dev, u32 irqstatus) -{ - struct m_can_classdev *cdev = netdev_priv(dev); - int work_done; - - work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, irqstatus); - - /* Don't re-enable interrupts if the driver had a fatal error - * (e.g., FIFO read failure). - */ - if (work_done < 0) - m_can_disable_all_interrupts(cdev); - - return work_done; -} - static int m_can_poll(struct napi_struct *napi, int quota) { struct net_device *dev = napi->dev; @@ -1216,7 +1200,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) } else { int pkts; - pkts = m_can_rx_peripheral(dev, ir); + pkts = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir); if (pkts < 0) goto out_fail; } From 281f32ea887ede780dc0a6805edadf60ad0fbd2b Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Mon, 5 Aug 2024 20:30:44 +0200 Subject: [PATCH 149/374] can: m_can: Do not cancel timer from within timer [ Upstream commit 4d5159bfafa8d1a205d8213b7434e0402588b9ed ] On setups without interrupts, the interrupt handler is called from a timer callback. For non-peripheral receives napi is scheduled, interrupts are disabled and the timer is canceled with a blocking call. In case of an error this can happen as well. Check if napi is scheduled in the timer callback after the interrupt handler executed. If napi is scheduled, the timer is disabled. It will be reenabled by m_can_poll(). Return error values from the interrupt handler so that interrupt threads and timer callback can deal differently with it. In case of the timer we only disable the timer. The rest will be done when stopping the interface. Fixes: b382380c0d2d ("can: m_can: Add hrtimer to generate software interrupt") Fixes: a163c5761019 ("can: m_can: Start/Cancel polling timer together with interrupts") Signed-off-by: Markus Schneider-Pargmann Link: https://lore.kernel.org/all/20240805183047.305630-5-msp@baylibre.com Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- drivers/net/can/m_can/m_can.c | 57 ++++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 15 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 2d73fa7f8258..d15655df6393 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -453,7 +453,7 @@ static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) if (!cdev->net->irq) { dev_dbg(cdev->dev, "Stop hrtimer\n"); - hrtimer_cancel(&cdev->hrtimer); + hrtimer_try_to_cancel(&cdev->hrtimer); } } @@ -1167,11 +1167,15 @@ static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir) HRTIMER_MODE_REL); } -static irqreturn_t m_can_isr(int irq, void *dev_id) +/* This interrupt handler is called either from the interrupt thread or a + * hrtimer. This has implications like cancelling a timer won't be possible + * blocking. + */ +static int m_can_interrupt_handler(struct m_can_classdev *cdev) { - struct net_device *dev = (struct net_device *)dev_id; - struct m_can_classdev *cdev = netdev_priv(dev); + struct net_device *dev = cdev->net; u32 ir; + int ret; if (pm_runtime_suspended(cdev->dev)) return IRQ_NONE; @@ -1198,11 +1202,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) m_can_disable_all_interrupts(cdev); napi_schedule(&cdev->napi); } else { - int pkts; - - pkts = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir); - if (pkts < 0) - goto out_fail; + ret = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir); + if (ret < 0) + return ret; } } @@ -1220,8 +1222,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) } else { if (ir & (IR_TEFN | IR_TEFW)) { /* New TX FIFO Element arrived */ - if (m_can_echo_tx_event(dev) != 0) - goto out_fail; + ret = m_can_echo_tx_event(dev); + if (ret != 0) + return ret; } } @@ -1229,16 +1232,31 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) can_rx_offload_threaded_irq_finish(&cdev->offload); return IRQ_HANDLED; +} -out_fail: - m_can_disable_all_interrupts(cdev); - return IRQ_HANDLED; +static irqreturn_t m_can_isr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct m_can_classdev *cdev = netdev_priv(dev); + int ret; + + ret = m_can_interrupt_handler(cdev); + if (ret < 0) { + m_can_disable_all_interrupts(cdev); + return IRQ_HANDLED; + } + + return ret; } static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer) { struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer); + if (cdev->can.state == CAN_STATE_BUS_OFF || + cdev->can.state == CAN_STATE_STOPPED) + return HRTIMER_NORESTART; + irq_wake_thread(cdev->net->irq, cdev->net); return HRTIMER_NORESTART; @@ -1930,8 +1948,17 @@ static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer) { struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer); + int ret; + + if (cdev->can.state == CAN_STATE_BUS_OFF || + cdev->can.state == CAN_STATE_STOPPED) + return HRTIMER_NORESTART; + + ret = m_can_interrupt_handler(cdev); - m_can_isr(0, cdev->net); + /* On error or if napi is scheduled to read, stop the timer */ + if (ret < 0 || napi_is_scheduled(&cdev->napi)) + return HRTIMER_NORESTART; hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS)); From 3fe629aa7d470d8400a9022bb1ffbd5cb6d6a270 Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Mon, 5 Aug 2024 20:30:45 +0200 Subject: [PATCH 150/374] can: m_can: disable_all_interrupts, not clear active_interrupts [ Upstream commit a572fea86c9b06cd3e6e89d79d565b52cb7e7cff ] active_interrupts is a cache for the enabled interrupts and not the global masking of interrupts. Do not clear this variable otherwise we may loose the state of the interrupts. Fixes: 07f25091ca02 ("can: m_can: Implement receive coalescing") Signed-off-by: Markus Schneider-Pargmann Link: https://lore.kernel.org/all/20240805183047.305630-6-msp@baylibre.com Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- drivers/net/can/m_can/m_can.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index d15655df6393..073842ab210d 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -449,7 +449,6 @@ static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) { m_can_coalescing_disable(cdev); m_can_write(cdev, M_CAN_ILE, 0x0); - cdev->active_interrupts = 0x0; if (!cdev->net->irq) { dev_dbg(cdev->dev, "Stop hrtimer\n"); From 9b6513daa5e20143e666c40318491e1e70f3d5cc Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Mon, 5 Aug 2024 20:30:46 +0200 Subject: [PATCH 151/374] can: m_can: Reset cached active_interrupts on start [ Upstream commit 733dbf556cd5b71d5e6f6aa7a93f117b438ab785 ] To force writing the enabled interrupts, reset the active_interrupts cache. Fixes: 07f25091ca02 ("can: m_can: Implement receive coalescing") Signed-off-by: Markus Schneider-Pargmann Link: https://lore.kernel.org/all/20240805183047.305630-7-msp@baylibre.com Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- drivers/net/can/m_can/m_can.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 073842ab210d..e4f0a382c216 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1505,6 +1505,7 @@ static int m_can_chip_config(struct net_device *dev) else interrupts &= ~(IR_ERR_LEC_31X); } + cdev->active_interrupts = 0; m_can_interrupt_enable(cdev, interrupts); /* route all interrupts to INT0 */ From c5a5feb1f84d8a9f5dcfe5adcd0ca0eca44e96d7 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Fri, 5 Jul 2024 17:28:27 +0200 Subject: [PATCH 152/374] can: mcp251xfd: fix ring configuration when switching from CAN-CC to CAN-FD mode [ Upstream commit 50ea5449c56310d2d31c28ba91a59232116d3c1e ] If the ring (rx, tx) and/or coalescing parameters (rx-frames-irq, tx-frames-irq) have been configured while the interface was in CAN-CC mode, but the interface is brought up in CAN-FD mode, the ring parameters might be too big. Use the default CAN-FD values in this case. Fixes: 9263c2e92be9 ("can: mcp251xfd: ring: add support for runtime configurable RX/TX ring parameters") Link: https://lore.kernel.org/all/20240805-mcp251xfd-fix-ringconfig-v1-1-72086f0ca5ee@pengutronix.de Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c | 11 +++++++++- .../net/can/spi/mcp251xfd/mcp251xfd-ring.c | 20 ++++++++++++++++--- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c index 9e8e82cdba46..61b0d6fa52dd 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c @@ -97,7 +97,16 @@ void can_ram_get_layout(struct can_ram_layout *layout, if (ring) { u8 num_rx_coalesce = 0, num_tx_coalesce = 0; - num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending); + /* If the ring parameters have been configured in + * CAN-CC mode, but and we are in CAN-FD mode now, + * they might be to big. Use the default CAN-FD values + * in this case. + */ + num_rx = ring->rx_pending; + if (num_rx > layout->max_rx) + num_rx = layout->default_rx; + + num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx); /* The ethtool doc says: * To disable coalescing, set usecs = 0 and max_frames = 1. diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c index 4cb79a4f2461..3a941a71c78f 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c @@ -468,11 +468,25 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) /* switching from CAN-2.0 to CAN-FD mode or vice versa */ if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) { + const struct ethtool_ringparam ring = { + .rx_pending = priv->rx_obj_num, + .tx_pending = priv->tx->obj_num, + }; + const struct ethtool_coalesce ec = { + .rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq, + .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq, + .tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq, + .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq, + }; struct can_ram_layout layout; - can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode); - priv->rx_obj_num = layout.default_rx; - tx_ring->obj_num = layout.default_tx; + can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode); + + priv->rx_obj_num = layout.cur_rx; + priv->rx_obj_num_coalesce_irq = layout.rx_coalesce; + + tx_ring->obj_num = layout.cur_tx; + priv->tx_obj_num_coalesce_irq = layout.tx_coalesce; } if (fd_mode) { From 9cf20320a013efaea501159b09cb2ab882ae8949 Mon Sep 17 00:00:00 2001 From: Andreas Hindborg Date: Thu, 15 Aug 2024 07:49:30 +0000 Subject: [PATCH 153/374] rust: kbuild: fix export of bss symbols [ Upstream commit b8673d56935c32a4e0a1a0b40951fdd313dbf340 ] Symbols in the bss segment are not currently exported. This is a problem for Rust modules that link against statics, that are resident in the kernel image. Thus export symbols in the bss segment. Fixes: 2f7ab1267dc9 ("Kbuild: add Rust support") Signed-off-by: Andreas Hindborg Reviewed-by: Alice Ryhl Tested-by: Alice Ryhl Reviewed-by: Gary Guo Link: https://lore.kernel.org/r/20240815074519.2684107-2-nmi@metaspace.dk [ Reworded slightly. - Miguel ] Signed-off-by: Miguel Ojeda Signed-off-by: Sasha Levin --- rust/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust/Makefile b/rust/Makefile index f70d5e244fee..47f9a9f1bdb3 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -359,7 +359,7 @@ $(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE quiet_cmd_exports = EXPORTS $@ cmd_exports = \ $(NM) -p --defined-only $< \ - | awk '/ (T|R|D) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@ + | awk '/ (T|R|D|B) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@ $(obj)/exports_core_generated.h: $(obj)/core.o FORCE $(call if_changed,exports) From 2e1c24f7f5e52b8b4145a3d1194885ab7ba77663 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 22 Aug 2024 23:06:48 +0100 Subject: [PATCH 154/374] cifs: Fix lack of credit renegotiation on read retry [ Upstream commit 6a5dcd487791e0c2d86622064602a5c7459941ed ] When netfslib asks cifs to issue a read operation, it prefaces this with a call to ->clamp_length() which cifs uses to negotiate credits, providing receive capacity on the server; however, in the event that a read op needs reissuing, netfslib doesn't call ->clamp_length() again as that could shorten the subrequest, leaving a gap. This causes the retried read to be done with zero credits which causes the server to reject it with STATUS_INVALID_PARAMETER. This is a problem for a DIO read that is requested that would go over the EOF. The short read will be retried, causing EINVAL to be returned to the user when it fails. Fix this by making cifs_req_issue_read() negotiate new credits if retrying (NETFS_SREQ_RETRYING now gets set in the read side as well as the write side in this instance). This isn't sufficient, however: the new credits might not be sufficient to complete the remainder of the read, so also add an additional field, rreq->actual_len, that holds the actual size of the op we want to perform without having to alter subreq->len. We then rely on repeated short reads being retried until we finish the read or reach the end of file and make a zero-length read. Also fix a couple of places where the subrequest start and length need to be altered by the amount so far transferred when being used. Fixes: 69c3c023af25 ("cifs: Implement netfslib hooks") Signed-off-by: David Howells cc: Steve French cc: Paulo Alcantara cc: Jeff Layton cc: linux-cifs@vger.kernel.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/netfs/io.c | 2 ++ fs/smb/client/cifsglob.h | 1 + fs/smb/client/file.c | 37 +++++++++++++++++++++++++++++++++---- fs/smb/client/smb2ops.c | 2 +- fs/smb/client/smb2pdu.c | 28 +++++++++++++++++----------- fs/smb/client/trace.h | 1 + 6 files changed, 55 insertions(+), 16 deletions(-) diff --git a/fs/netfs/io.c b/fs/netfs/io.c index c96431d3da6d..2a5c22606fb1 100644 --- a/fs/netfs/io.c +++ b/fs/netfs/io.c @@ -306,6 +306,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq) break; subreq->source = NETFS_DOWNLOAD_FROM_SERVER; subreq->error = 0; + __set_bit(NETFS_SREQ_RETRYING, &subreq->flags); netfs_stat(&netfs_n_rh_download_instead); trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead); netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); @@ -313,6 +314,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq) netfs_reset_subreq_iter(rreq, subreq); netfs_read_from_server(rreq, subreq); } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) { + __set_bit(NETFS_SREQ_RETRYING, &subreq->flags); netfs_reset_subreq_iter(rreq, subreq); netfs_rreq_short_read(rreq, subreq); } diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index 1e4da268de3b..552792f28122 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -1508,6 +1508,7 @@ struct cifs_io_subrequest { struct cifs_io_request *req; }; ssize_t got_bytes; + size_t actual_len; unsigned int xid; int result; bool have_xid; diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index b202eac6584e..533f76118316 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -111,6 +111,7 @@ static void cifs_issue_write(struct netfs_io_subrequest *subreq) goto fail; } + wdata->actual_len = wdata->subreq.len; rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust); if (rc) goto fail; @@ -153,7 +154,7 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq) struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); struct TCP_Server_Info *server = req->server; struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); - size_t rsize = 0; + size_t rsize; int rc; rdata->xid = get_xid(); @@ -166,8 +167,8 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq) cifs_sb->ctx); - rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize, - &rdata->credits); + rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, + &rsize, &rdata->credits); if (rc) { subreq->error = rc; return false; @@ -183,7 +184,8 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq) server->credits, server->in_flight, 0, cifs_trace_rw_credits_read_submit); - subreq->len = min_t(size_t, subreq->len, rsize); + subreq->len = umin(subreq->len, rsize); + rdata->actual_len = subreq->len; #ifdef CONFIG_CIFS_SMB_DIRECT if (server->smbd_conn) @@ -203,12 +205,39 @@ static void cifs_req_issue_read(struct netfs_io_subrequest *subreq) struct netfs_io_request *rreq = subreq->rreq; struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); + struct TCP_Server_Info *server = req->server; + struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); int rc = 0; cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n", __func__, rreq->debug_id, subreq->debug_index, rreq->mapping, subreq->transferred, subreq->len); + if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) { + /* + * As we're issuing a retry, we need to negotiate some new + * credits otherwise the server may reject the op with + * INVALID_PARAMETER. Note, however, we may get back less + * credit than we need to complete the op, in which case, we + * shorten the op and rely on additional rounds of retry. + */ + size_t rsize = umin(subreq->len - subreq->transferred, + cifs_sb->ctx->rsize); + + rc = server->ops->wait_mtu_credits(server, rsize, &rdata->actual_len, + &rdata->credits); + if (rc) + goto out; + + rdata->credits.in_flight_check = 1; + + trace_smb3_rw_credits(rdata->rreq->debug_id, + rdata->subreq.debug_index, + rdata->credits.value, + server->credits, server->in_flight, 0, + cifs_trace_rw_credits_read_resubmit); + } + if (req->cfile->invalidHandle) { do { rc = cifs_reopen_file(req->cfile, true); diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index f44f5f249400..42352f70b01c 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -301,7 +301,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server, unsigned int /*enum smb3_rw_credits_trace*/ trace) { struct cifs_credits *credits = &subreq->credits; - int new_val = DIV_ROUND_UP(subreq->subreq.len, SMB2_MAX_BUFFER_SIZE); + int new_val = DIV_ROUND_UP(subreq->actual_len, SMB2_MAX_BUFFER_SIZE); int scredits, in_flight; if (!credits->value || credits->value == new_val) diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index d262e70100c9..5f5f51bf9850 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -4523,9 +4523,9 @@ smb2_readv_callback(struct mid_q_entry *mid) "rdata server %p != mid server %p", rdata->server, mid->server); - cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n", + cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu/%zu\n", __func__, mid->mid, mid->mid_state, rdata->result, - rdata->subreq.len); + rdata->actual_len, rdata->subreq.len - rdata->subreq.transferred); switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: @@ -4579,15 +4579,18 @@ smb2_readv_callback(struct mid_q_entry *mid) rdata->subreq.debug_index, rdata->xid, rdata->req->cfile->fid.persistent_fid, - tcon->tid, tcon->ses->Suid, rdata->subreq.start, - rdata->subreq.len, rdata->result); + tcon->tid, tcon->ses->Suid, + rdata->subreq.start + rdata->subreq.transferred, + rdata->actual_len, + rdata->result); } else trace_smb3_read_done(rdata->rreq->debug_id, rdata->subreq.debug_index, rdata->xid, rdata->req->cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, - rdata->subreq.start, rdata->got_bytes); + rdata->subreq.start + rdata->subreq.transferred, + rdata->got_bytes); if (rdata->result == -ENODATA) { /* We may have got an EOF error because fallocate @@ -4615,6 +4618,7 @@ smb2_async_readv(struct cifs_io_subrequest *rdata) { int rc, flags = 0; char *buf; + struct netfs_io_subrequest *subreq = &rdata->subreq; struct smb2_hdr *shdr; struct cifs_io_parms io_parms; struct smb_rqst rqst = { .rq_iov = rdata->iov, @@ -4625,15 +4629,15 @@ smb2_async_readv(struct cifs_io_subrequest *rdata) int credit_request; cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n", - __func__, rdata->subreq.start, rdata->subreq.len); + __func__, subreq->start, subreq->len); if (!rdata->server) rdata->server = cifs_pick_channel(tcon->ses); io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink); io_parms.server = server = rdata->server; - io_parms.offset = rdata->subreq.start; - io_parms.length = rdata->subreq.len; + io_parms.offset = subreq->start + subreq->transferred; + io_parms.length = rdata->actual_len; io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid; io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid; io_parms.pid = rdata->req->pid; @@ -4648,11 +4652,13 @@ smb2_async_readv(struct cifs_io_subrequest *rdata) rdata->iov[0].iov_base = buf; rdata->iov[0].iov_len = total_len; + rdata->got_bytes = 0; + rdata->result = 0; shdr = (struct smb2_hdr *)buf; if (rdata->credits.value > 0) { - shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->subreq.len, + shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->actual_len, SMB2_MAX_BUFFER_SIZE)); credit_request = le16_to_cpu(shdr->CreditCharge) + 8; if (server->credits >= server->max_credits) @@ -4676,11 +4682,11 @@ smb2_async_readv(struct cifs_io_subrequest *rdata) if (rc) { cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); trace_smb3_read_err(rdata->rreq->debug_id, - rdata->subreq.debug_index, + subreq->debug_index, rdata->xid, io_parms.persistent_fid, io_parms.tcon->tid, io_parms.tcon->ses->Suid, - io_parms.offset, io_parms.length, rc); + io_parms.offset, rdata->actual_len, rc); } async_readv_out: diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h index 36d5295c2a6f..13adfe550b99 100644 --- a/fs/smb/client/trace.h +++ b/fs/smb/client/trace.h @@ -30,6 +30,7 @@ EM(cifs_trace_rw_credits_old_session, "old-session") \ EM(cifs_trace_rw_credits_read_response_add, "rd-resp-add") \ EM(cifs_trace_rw_credits_read_response_clear, "rd-resp-clr") \ + EM(cifs_trace_rw_credits_read_resubmit, "rd-resubmit") \ EM(cifs_trace_rw_credits_read_submit, "rd-submit ") \ EM(cifs_trace_rw_credits_write_prepare, "wr-prepare ") \ EM(cifs_trace_rw_credits_write_response_add, "wr-resp-add") \ From f3d8e8fde4545db6fda45eec0d42ed002289548e Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 22 Aug 2024 23:06:49 +0100 Subject: [PATCH 155/374] netfs, cifs: Fix handling of short DIO read [ Upstream commit 1da29f2c39b67b846b74205c81bf0ccd96d34727 ] Short DIO reads, particularly in relation to cifs, are not being handled correctly by cifs and netfslib. This can be tested by doing a DIO read of a file where the size of read is larger than the size of the file. When it crosses the EOF, it gets a short read and this gets retried, and in the case of cifs, the retry read fails, with the failure being translated to ENODATA. Fix this by the following means: (1) Add a flag, NETFS_SREQ_HIT_EOF, for the filesystem to set when it detects that the read did hit the EOF. (2) Make the netfslib read assessment stop processing subrequests when it encounters one with that flag set. (3) Return rreq->transferred, the accumulated contiguous amount read to that point, to userspace for a DIO read. (4) Make cifs set the flag and clear the error if the read RPC returned ENODATA. (5) Make cifs set the flag and clear the error if a short read occurred without error and the read-to file position is now at the remote inode size. Fixes: 69c3c023af25 ("cifs: Implement netfslib hooks") Signed-off-by: David Howells cc: Steve French cc: Paulo Alcantara cc: Jeff Layton cc: linux-cifs@vger.kernel.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/netfs/io.c | 17 +++++++++++------ fs/smb/client/smb2pdu.c | 13 +++++++++---- include/linux/netfs.h | 1 + 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/fs/netfs/io.c b/fs/netfs/io.c index 2a5c22606fb1..c91e7b12bbf1 100644 --- a/fs/netfs/io.c +++ b/fs/netfs/io.c @@ -368,7 +368,8 @@ static void netfs_rreq_assess_dio(struct netfs_io_request *rreq) if (subreq->error || subreq->transferred == 0) break; transferred += subreq->transferred; - if (subreq->transferred < subreq->len) + if (subreq->transferred < subreq->len || + test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) break; } @@ -503,7 +504,8 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq, subreq->error = 0; subreq->transferred += transferred_or_error; - if (subreq->transferred < subreq->len) + if (subreq->transferred < subreq->len && + !test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) goto incomplete; complete: @@ -777,10 +779,13 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync) TASK_UNINTERRUPTIBLE); ret = rreq->error; - if (ret == 0 && rreq->submitted < rreq->len && - rreq->origin != NETFS_DIO_READ) { - trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read); - ret = -EIO; + if (ret == 0) { + if (rreq->origin == NETFS_DIO_READ) { + ret = rreq->transferred; + } else if (rreq->submitted < rreq->len) { + trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read); + ret = -EIO; + } } } else { /* If we decrement nr_outstanding to 0, the ref belongs to us. */ diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 5f5f51bf9850..8e02e9f45e0e 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -4501,6 +4501,7 @@ static void smb2_readv_callback(struct mid_q_entry *mid) { struct cifs_io_subrequest *rdata = mid->callback_data; + struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode); struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink); struct TCP_Server_Info *server = rdata->server; struct smb2_hdr *shdr = @@ -4593,11 +4594,15 @@ smb2_readv_callback(struct mid_q_entry *mid) rdata->got_bytes); if (rdata->result == -ENODATA) { - /* We may have got an EOF error because fallocate - * failed to enlarge the file. - */ - if (rdata->subreq.start < rdata->subreq.rreq->i_size) + __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); + rdata->result = 0; + } else { + if (rdata->got_bytes < rdata->actual_len && + rdata->subreq.start + rdata->subreq.transferred + rdata->got_bytes == + ictx->remote_i_size) { + __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); rdata->result = 0; + } } trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value, server->credits, server->in_flight, diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 5d0288938cc2..d8892b1a2dd7 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -200,6 +200,7 @@ struct netfs_io_subrequest { #define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */ #define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */ #define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */ +#define NETFS_SREQ_HIT_EOF 12 /* Set if we hit the EOF */ }; enum netfs_io_origin { From 55e78331e0caf1b09c797ca917ee1af2c075c10a Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 27 Aug 2024 15:47:27 +0100 Subject: [PATCH 156/374] cifs: Fix copy offload to flush destination region [ Upstream commit 8101d6e112e2524e967368f920c404ae445a9757 ] Fix cifs_file_copychunk_range() to flush the destination region before invalidating it to avoid potential loss of data should the copy fail, in whole or in part, in some way. Fixes: 7b2404a886f8 ("cifs: Fix flushing, invalidation and file size with copy_file_range()") Signed-off-by: David Howells cc: Steve French cc: Paulo Alcantara cc: Shyam Prasad N cc: Rohith Surabattula cc: Matthew Wilcox cc: Jeff Layton cc: linux-cifs@vger.kernel.org cc: linux-mm@kvack.org cc: linux-fsdevel@vger.kernel.org Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/smb/client/cifsfs.c | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index 2c4b357d85e2..a1acf5bd1e3a 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -1341,7 +1341,6 @@ ssize_t cifs_file_copychunk_range(unsigned int xid, struct cifsFileInfo *smb_file_target; struct cifs_tcon *src_tcon; struct cifs_tcon *target_tcon; - unsigned long long destend, fstart, fend; ssize_t rc; cifs_dbg(FYI, "copychunk range\n"); @@ -1391,25 +1390,13 @@ ssize_t cifs_file_copychunk_range(unsigned int xid, goto unlock; } - destend = destoff + len - 1; - - /* Flush the folios at either end of the destination range to prevent - * accidental loss of dirty data outside of the range. + /* Flush and invalidate all the folios in the destination region. If + * the copy was successful, then some of the flush is extra overhead, + * but we need to allow for the copy failing in some way (eg. ENOSPC). */ - fstart = destoff; - fend = destend; - - rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true); + rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1); if (rc) goto unlock; - rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false); - if (rc) - goto unlock; - if (fend > target_cifsi->netfs.zero_point) - target_cifsi->netfs.zero_point = fend + 1; - - /* Discard all the folios that overlap the destination region. */ - truncate_inode_pages_range(&target_inode->i_data, fstart, fend); fscache_invalidate(cifs_inode_cookie(target_inode), NULL, i_size_read(target_inode), 0); From 7dfa4279c55f467a88e653a5d544817181b22aea Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 28 Aug 2024 21:08:25 +0100 Subject: [PATCH 157/374] cifs: Fix FALLOC_FL_ZERO_RANGE to preflush buffered part of target region [ Upstream commit 91d1dfae464987aaf6c79ff51d8674880fb3be77 ] Under certain conditions, the range to be cleared by FALLOC_FL_ZERO_RANGE may only be buffered locally and not yet have been flushed to the server. For example: xfs_io -f -t -c "pwrite -S 0x41 0 4k" \ -c "pwrite -S 0x42 4k 4k" \ -c "fzero 0 4k" \ -c "pread -v 0 8k" /xfstest.test/foo will write two 4KiB blocks of data, which get buffered in the pagecache, and then fallocate() is used to clear the first 4KiB block on the server - but we don't flush the data first, which means the EOF position on the server is wrong, and so the FSCTL_SET_ZERO_DATA RPC fails (and xfs_io ignores the error), but then when we try to read it, we see the old data. Fix this by preflushing any part of the target region that above the server's idea of the EOF position to force the server to update its EOF position. Note, however, that we don't want to simply expand the file by moving the EOF before doing the FSCTL_SET_ZERO_DATA[*] because someone else might see the zeroed region or if the RPC fails we then have to try to clean it up or risk getting corruption. [*] And we have to move the EOF first otherwise FSCTL_SET_ZERO_DATA won't do what we want. This fixes the generic/008 xfstest. [!] Note: A better way to do this might be to split the operation into two parts: we only do FSCTL_SET_ZERO_DATA for the part of the range below the server's EOF and then, if that worked, invalidate the buffered pages for the part above the range. Fixes: 6b69040247e1 ("cifs/smb3: Fix data inconsistent when zero file range") Signed-off-by: David Howells cc: Steve French cc: Zhang Xiaoxu cc: Pavel Shilovsky cc: Paulo Alcantara cc: Shyam Prasad N cc: Rohith Surabattula cc: Jeff Layton cc: linux-cifs@vger.kernel.org cc: linux-mm@kvack.org Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/smb/client/smb2ops.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 42352f70b01c..1d6e8eacdd74 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -3219,13 +3219,15 @@ static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon, } static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, - loff_t offset, loff_t len, bool keep_size) + unsigned long long offset, unsigned long long len, + bool keep_size) { struct cifs_ses *ses = tcon->ses; struct inode *inode = file_inode(file); struct cifsInodeInfo *cifsi = CIFS_I(inode); struct cifsFileInfo *cfile = file->private_data; - unsigned long long new_size; + struct netfs_inode *ictx = netfs_inode(inode); + unsigned long long i_size, new_size, remote_size; long rc; unsigned int xid; @@ -3237,6 +3239,16 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, inode_lock(inode); filemap_invalidate_lock(inode->i_mapping); + i_size = i_size_read(inode); + remote_size = ictx->remote_i_size; + if (offset + len >= remote_size && offset < i_size) { + unsigned long long top = umin(offset + len, i_size); + + rc = filemap_write_and_wait_range(inode->i_mapping, offset, top - 1); + if (rc < 0) + goto zero_range_exit; + } + /* * We zero the range through ioctl, so we need remove the page caches * first, otherwise the data may be inconsistent with the server. From 50161385e9d655bc6ebbee5e42fea60b8842129f Mon Sep 17 00:00:00 2001 From: Daiwei Li Date: Tue, 13 Aug 2024 21:55:53 -0700 Subject: [PATCH 158/374] igb: Fix not clearing TimeSync interrupts for 82580 [ Upstream commit ba8cf80724dbc09825b52498e4efacb563935408 ] 82580 NICs have a hardware bug that makes it necessary to write into the TSICR (TimeSync Interrupt Cause) register to clear it: https://lore.kernel.org/all/CDCB8BE0.1EC2C%25matthew.vick@intel.com/ Add a conditional so only for 82580 we write into the TSICR register, so we don't risk losing events for other models. Without this change, when running ptp4l with an Intel 82580 card, I get the following output: > timed out while polling for tx timestamp increasing tx_timestamp_timeout or > increasing kworker priority may correct this issue, but a driver bug likely > causes it This goes away with this change. This (partially) reverts commit ee14cc9ea19b ("igb: Fix missing time sync events"). Fixes: ee14cc9ea19b ("igb: Fix missing time sync events") Closes: https://lore.kernel.org/intel-wired-lan/CAN0jFd1kO0MMtOh8N2Ztxn6f7vvDKp2h507sMryobkBKe=xk=w@mail.gmail.com/ Tested-by: Daiwei Li Suggested-by: Vinicius Costa Gomes Signed-off-by: Daiwei Li Acked-by: Vinicius Costa Gomes Reviewed-by: Kurt Kanzenbach Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/igb/igb_main.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b6aa449aa56a..a27d0a4d3d9c 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6961,10 +6961,20 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) static void igb_tsync_interrupt(struct igb_adapter *adapter) { + const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS | + TSINTR_TT0 | TSINTR_TT1 | + TSINTR_AUTT0 | TSINTR_AUTT1); struct e1000_hw *hw = &adapter->hw; u32 tsicr = rd32(E1000_TSICR); struct ptp_clock_event event; + if (hw->mac.type == e1000_82580) { + /* 82580 has a hardware bug that requires an explicit + * write to clear the TimeSync interrupt cause. + */ + wr32(E1000_TSICR, tsicr & mask); + } + if (tsicr & TSINTR_SYS_WRAP) { event.type = PTP_CLOCK_PPS; if (adapter->ptp_caps.pps) From 36486c9e8e01b84faaee47203eac0b7e9cc7fa4a Mon Sep 17 00:00:00 2001 From: Dawid Osuchowski Date: Wed, 21 Aug 2024 18:06:40 +0200 Subject: [PATCH 159/374] ice: Add netif_device_attach/detach into PF reset flow [ Upstream commit d11a67634227f9f9da51938af085fb41a733848f ] Ethtool callbacks can be executed while reset is in progress and try to access deleted resources, e.g. getting coalesce settings can result in a NULL pointer dereference seen below. Reproduction steps: Once the driver is fully initialized, trigger reset: # echo 1 > /sys/class/net//device/reset when reset is in progress try to get coalesce settings using ethtool: # ethtool -c BUG: kernel NULL pointer dereference, address: 0000000000000020 PGD 0 P4D 0 Oops: Oops: 0000 [#1] PREEMPT SMP PTI CPU: 11 PID: 19713 Comm: ethtool Tainted: G S 6.10.0-rc7+ #7 RIP: 0010:ice_get_q_coalesce+0x2e/0xa0 [ice] RSP: 0018:ffffbab1e9bcf6a8 EFLAGS: 00010206 RAX: 000000000000000c RBX: ffff94512305b028 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff9451c3f2e588 RDI: ffff9451c3f2e588 RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 R10: ffff9451c3f2e580 R11: 000000000000001f R12: ffff945121fa9000 R13: ffffbab1e9bcf760 R14: 0000000000000013 R15: ffffffff9e65dd40 FS: 00007faee5fbe740(0000) GS:ffff94546fd80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000020 CR3: 0000000106c2e005 CR4: 00000000001706f0 Call Trace: ice_get_coalesce+0x17/0x30 [ice] coalesce_prepare_data+0x61/0x80 ethnl_default_doit+0xde/0x340 genl_family_rcv_msg_doit+0xf2/0x150 genl_rcv_msg+0x1b3/0x2c0 netlink_rcv_skb+0x5b/0x110 genl_rcv+0x28/0x40 netlink_unicast+0x19c/0x290 netlink_sendmsg+0x222/0x490 __sys_sendto+0x1df/0x1f0 __x64_sys_sendto+0x24/0x30 do_syscall_64+0x82/0x160 entry_SYSCALL_64_after_hwframe+0x76/0x7e RIP: 0033:0x7faee60d8e27 Calling netif_device_detach() before reset makes the net core not call the driver when ethtool command is issued, the attempt to execute an ethtool command during reset will result in the following message: netlink error: No such device instead of NULL pointer dereference. Once reset is done and ice_rebuild() is executing, the netif_device_attach() is called to allow for ethtool operations to occur again in a safe manner. Fixes: fcea6f3da546 ("ice: Add stats and ethtool support") Suggested-by: Jakub Kicinski Reviewed-by: Igor Bagnucki Signed-off-by: Dawid Osuchowski Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Reviewed-by: Michal Schmidt Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/ice/ice_main.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 253689dbf6c3..209bfd70c430 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -609,6 +609,9 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); } } + + if (vsi->netdev) + netif_device_detach(vsi->netdev); skip: /* clear SW filtering DB */ @@ -7590,6 +7593,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf) */ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) { + struct ice_vsi *vsi = ice_get_main_vsi(pf); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; bool dvm; @@ -7734,6 +7738,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_rebuild_arfs(pf); } + if (vsi && vsi->netdev) + netif_device_attach(vsi->netdev); + ice_update_pf_netdev_link(pf); /* tell the firmware we are up */ From 7a9203f945fee46a7caefc27a4212399bb7ebdca Mon Sep 17 00:00:00 2001 From: Aleksandr Mishin Date: Fri, 30 Aug 2024 09:54:28 +0300 Subject: [PATCH 160/374] platform/x86: dell-smbios: Fix error path in dell_smbios_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit ffc17e1479e8e9459b7afa80e5d9d40d0dd78abb ] In case of error in build_tokens_sysfs(), all the memory that has been allocated is freed at end of this function. But then free_group() is called which performs memory deallocation again. Also, instead of free_group() call, there should be exit_dell_smbios_smm() and exit_dell_smbios_wmi() calls, since there is initialization, but there is no release of resources in case of an error. Fix these issues by replacing free_group() call with exit_dell_smbios_wmi() and exit_dell_smbios_smm(). Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 33b9ca1e53b4 ("platform/x86: dell-smbios: Add a sysfs interface for SMBIOS tokens") Signed-off-by: Aleksandr Mishin Link: https://lore.kernel.org/r/20240830065428.9544-1-amishin@t-argos.ru Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen Signed-off-by: Sasha Levin --- drivers/platform/x86/dell/dell-smbios-base.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c index b562ed99ec4e..4702669dbb60 100644 --- a/drivers/platform/x86/dell/dell-smbios-base.c +++ b/drivers/platform/x86/dell/dell-smbios-base.c @@ -587,7 +587,10 @@ static int __init dell_smbios_init(void) return 0; fail_sysfs: - free_group(platform_device); + if (!wmi) + exit_dell_smbios_wmi(); + if (!smm) + exit_dell_smbios_smm(); fail_create_group: platform_device_del(platform_device); From 6e68abdc5d674f9f4185bf1e1956368d05df4838 Mon Sep 17 00:00:00 2001 From: Charles Han Date: Fri, 30 Aug 2024 15:41:06 +0800 Subject: [PATCH 161/374] spi: intel: Add check devm_kasprintf() returned value [ Upstream commit 2920294686ec23211637998f3ec386dfd3d784a6 ] intel_spi_populate_chip() use devm_kasprintf() to set pdata->name. This can return a NULL pointer on failure but this returned value is not checked. Fixes: e58db3bcd93b ("spi: intel: Add default partition and name to the second chip") Signed-off-by: Charles Han Reviewed-by: Mika Westerberg Link: https://patch.msgid.link/20240830074106.8744-1-hanchunchao@inspur.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/spi/spi-intel.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c index 3e5dcf2b3c8a..795b7e72baea 100644 --- a/drivers/spi/spi-intel.c +++ b/drivers/spi/spi-intel.c @@ -1390,6 +1390,9 @@ static int intel_spi_populate_chip(struct intel_spi *ispi) pdata->name = devm_kasprintf(ispi->dev, GFP_KERNEL, "%s-chip1", dev_name(ispi->dev)); + if (!pdata->name) + return -ENOMEM; + pdata->nr_parts = 1; parts = devm_kcalloc(ispi->dev, pdata->nr_parts, sizeof(*parts), GFP_KERNEL); From b15bfd9701c61c494c25dec0185dd63c6f452ce9 Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Fri, 30 Aug 2024 07:35:12 -0700 Subject: [PATCH 162/374] regulator: core: Stub devm_regulator_bulk_get_const() if !CONFIG_REGULATOR [ Upstream commit 1a5caec7f80ca2e659c03f45378ee26915f4eda2 ] When adding devm_regulator_bulk_get_const() I missed adding a stub for when CONFIG_REGULATOR is not enabled. Under certain conditions (like randconfig testing) this can cause the compiler to reports errors like: error: implicit declaration of function 'devm_regulator_bulk_get_const'; did you mean 'devm_regulator_bulk_get_enable'? Add the stub. Fixes: 1de452a0edda ("regulator: core: Allow drivers to define their init data as const") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202408301813.TesFuSbh-lkp@intel.com/ Cc: Neil Armstrong Signed-off-by: Douglas Anderson Link: https://patch.msgid.link/20240830073511.1.Ib733229a8a19fad8179213c05e1af01b51e42328@changeid Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- include/linux/regulator/consumer.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 59d0b9a79e6e..e6ad927bb4a8 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -451,6 +451,14 @@ static inline int of_regulator_bulk_get_all(struct device *dev, struct device_no return 0; } +static inline int devm_regulator_bulk_get_const( + struct device *dev, int num_consumers, + const struct regulator_bulk_data *in_consumers, + struct regulator_bulk_data **out_consumers) +{ + return 0; +} + static inline int regulator_bulk_enable(int num_consumers, struct regulator_bulk_data *consumers) { From fa2951deaea737763f35a486146750bfcb608bdd Mon Sep 17 00:00:00 2001 From: Martin Jocic Date: Fri, 14 Jun 2024 17:15:19 +0200 Subject: [PATCH 163/374] can: kvaser_pciefd: Skip redundant NULL pointer check in ISR [ Upstream commit ac765219c2c4e44f29063724c8d36435a3e61985 ] This check is already done at the creation of the net devices in kvaser_pciefd_setup_can_ctrls called from kvaser_pciefd_probe. If it fails, the driver won't load, so there should be no need to repeat the check inside the ISR. The number of channels is read from the FPGA and should be trusted. Signed-off-by: Martin Jocic Link: https://lore.kernel.org/all/20240614151524.2718287-3-martin.jocic@kvaser.com Signed-off-by: Marc Kleine-Budde Stable-dep-of: dd885d90c047 ("can: kvaser_pciefd: Use a single write when releasing RX buffers") Signed-off-by: Sasha Levin --- drivers/net/can/kvaser_pciefd.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 7b5028b67cd5..aebc221b82c2 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -1701,12 +1701,6 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) kvaser_pciefd_receive_irq(pcie); for (i = 0; i < pcie->nr_channels; i++) { - if (!pcie->can[i]) { - dev_err(&pcie->pci->dev, - "IRQ mask points to unallocated controller\n"); - break; - } - /* Check that mask matches channel (i) IRQ mask */ if (board_irq & irq_mask->kcan_tx[i]) kvaser_pciefd_transmit_irq(pcie->can[i]); From 662f25ae3401247e3c439d68570f70ae5bb7ea08 Mon Sep 17 00:00:00 2001 From: Martin Jocic Date: Fri, 14 Jun 2024 17:15:20 +0200 Subject: [PATCH 164/374] can: kvaser_pciefd: Remove unnecessary comment [ Upstream commit 11d186697ceb10b68c6a1fd505635346b1ccd055 ] The code speaks for itself. Signed-off-by: Martin Jocic Link: https://lore.kernel.org/all/20240614151524.2718287-4-martin.jocic@kvaser.com Signed-off-by: Marc Kleine-Budde Stable-dep-of: dd885d90c047 ("can: kvaser_pciefd: Use a single write when releasing RX buffers") Signed-off-by: Sasha Levin --- drivers/net/can/kvaser_pciefd.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index aebc221b82c2..3ac18dd0a022 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -1701,7 +1701,6 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) kvaser_pciefd_receive_irq(pcie); for (i = 0; i < pcie->nr_channels; i++) { - /* Check that mask matches channel (i) IRQ mask */ if (board_irq & irq_mask->kcan_tx[i]) kvaser_pciefd_transmit_irq(pcie->can[i]); } From 330912c6f39d34c5977bd734b9e6d976c145347f Mon Sep 17 00:00:00 2001 From: Martin Jocic Date: Fri, 14 Jun 2024 17:15:23 +0200 Subject: [PATCH 165/374] can: kvaser_pciefd: Rename board_irq to pci_irq [ Upstream commit cbf88a6ba7bb6ce0d3131b119298f73bd7b18459 ] Rename the variable name board_irq in the ISR to pci_irq to be more specific and to match the macro by which it is read. Signed-off-by: Martin Jocic Link: https://lore.kernel.org/all/20240614151524.2718287-7-martin.jocic@kvaser.com Signed-off-by: Marc Kleine-Budde Stable-dep-of: dd885d90c047 ("can: kvaser_pciefd: Use a single write when releasing RX buffers") Signed-off-by: Sasha Levin --- drivers/net/can/kvaser_pciefd.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 3ac18dd0a022..a026ea2f5b35 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -1691,17 +1691,17 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) { struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; - u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); + u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); int i; - if (!(board_irq & irq_mask->all)) + if (!(pci_irq & irq_mask->all)) return IRQ_NONE; - if (board_irq & irq_mask->kcan_rx0) + if (pci_irq & irq_mask->kcan_rx0) kvaser_pciefd_receive_irq(pcie); for (i = 0; i < pcie->nr_channels; i++) { - if (board_irq & irq_mask->kcan_tx[i]) + if (pci_irq & irq_mask->kcan_tx[i]) kvaser_pciefd_transmit_irq(pcie->can[i]); } From d1a40a5c2a4cfdd93aea75bebd33ef2fcd638f40 Mon Sep 17 00:00:00 2001 From: Martin Jocic Date: Thu, 20 Jun 2024 20:13:19 +0200 Subject: [PATCH 166/374] can: kvaser_pciefd: Move reset of DMA RX buffers to the end of the ISR [ Upstream commit 48f827d4f48f5243e37b9240029ce3f456d1f490 ] A new interrupt is triggered by resetting the DMA RX buffers. Since MSI interrupts are faster than legacy interrupts, the reset of the DMA buffers must be moved to the very end of the ISR, otherwise a new MSI interrupt will be masked by the current one. Signed-off-by: Martin Jocic Link: https://lore.kernel.org/all/20240620181320.235465-2-martin.jocic@kvaser.com Signed-off-by: Marc Kleine-Budde Stable-dep-of: dd885d90c047 ("can: kvaser_pciefd: Use a single write when releasing RX buffers") Signed-off-by: Sasha Levin --- drivers/net/can/kvaser_pciefd.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index a026ea2f5b35..cc39befc9290 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -1640,23 +1640,15 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) return res; } -static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) +static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) { u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) kvaser_pciefd_read_buffer(pcie, 0); - /* Reset DMA buffer 0 */ - iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, - KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); - } - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) kvaser_pciefd_read_buffer(pcie, 1); - /* Reset DMA buffer 1 */ - iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, - KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); - } if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || @@ -1665,6 +1657,7 @@ static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); + return irq; } static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) @@ -1692,19 +1685,32 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); + u32 srb_irq = 0; int i; if (!(pci_irq & irq_mask->all)) return IRQ_NONE; if (pci_irq & irq_mask->kcan_rx0) - kvaser_pciefd_receive_irq(pcie); + srb_irq = kvaser_pciefd_receive_irq(pcie); for (i = 0; i < pcie->nr_channels; i++) { if (pci_irq & irq_mask->kcan_tx[i]) kvaser_pciefd_transmit_irq(pcie->can[i]); } + if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { + /* Reset DMA buffer 0, may trigger new interrupt */ + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); + } + + if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { + /* Reset DMA buffer 1, may trigger new interrupt */ + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); + } + return IRQ_HANDLED; } From c4adc08b85f9c82a356aebd84a2f55ab146acffe Mon Sep 17 00:00:00 2001 From: Martin Jocic Date: Fri, 30 Aug 2024 17:31:13 +0200 Subject: [PATCH 167/374] can: kvaser_pciefd: Use a single write when releasing RX buffers [ Upstream commit dd885d90c047dbdd2773c1d33954cbd8747d81e2 ] Kvaser's PCIe cards uses the KCAN FPGA IP block which has dual 4K buffers for incoming messages shared by all (currently up to eight) channels. While the driver processes messages in one buffer, new incoming messages are stored in the other and so on. The design of KCAN is such that a buffer must be fully read and then released. Releasing a buffer will make the FPGA switch buffers. If the other buffer contains at least one incoming message the FPGA will also instantly issue a new interrupt, if not the interrupt will be issued after receiving the first new message. With IRQx interrupts, it takes a little time for the interrupt to happen, enough for any previous ISR call to do it's business and return, but MSI interrupts are way faster so this time is reduced to almost nothing. So with MSI, releasing the buffer HAS to be the very last action of the ISR before returning, otherwise the new interrupt might be "masked" by the kernel because the previous ISR call hasn't returned. And the interrupts are edge-triggered so we cannot loose one, or the ping-pong reading process will stop. This is why this patch modifies the driver to use a single write to the SRB_CMD register before returning. Signed-off-by: Martin Jocic Reviewed-by: Vincent Mailhol Link: https://patch.msgid.link/20240830153113.2081440-1-martin.jocic@kvaser.com Fixes: 26ad340e582d ("can: kvaser_pciefd: Add driver for Kvaser PCIEcan devices") Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- drivers/net/can/kvaser_pciefd.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index cc39befc9290..ab15a2ae8a20 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -1686,6 +1686,7 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); u32 srb_irq = 0; + u32 srb_release = 0; int i; if (!(pci_irq & irq_mask->all)) @@ -1699,17 +1700,14 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) kvaser_pciefd_transmit_irq(pcie->can[i]); } - if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { - /* Reset DMA buffer 0, may trigger new interrupt */ - iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, - KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); - } + if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) + srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0; - if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { - /* Reset DMA buffer 1, may trigger new interrupt */ - iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, - KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); - } + if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) + srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1; + + if (srb_release) + iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); return IRQ_HANDLED; } From 5d8a15c1138e3d92bd45314deb133a4c1eac3747 Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Wed, 21 Aug 2024 15:43:40 -0700 Subject: [PATCH 168/374] Bluetooth: qca: If memdump doesn't work, re-enable IBS [ Upstream commit 8ae22de9d2eae3c432de64bf2b3a5a69cf1d1124 ] On systems in the field, we are seeing this sometimes in the kernel logs: Bluetooth: qca_controller_memdump() hci0: hci_devcd_init Return:-95 This means that _something_ decided that it wanted to get a memdump but then hci_devcd_init() returned -EOPNOTSUPP (AKA -95). The cleanup code in qca_controller_memdump() when we get back an error from hci_devcd_init() undoes most things but forgets to clear QCA_IBS_DISABLED. One side effect of this is that, during the next suspend, qca_suspend() will always get a timeout. Let's fix it so that we clear the bit. Fixes: 06d3fdfcdf5c ("Bluetooth: hci_qca: Add qcom devcoredump support") Reviewed-by: Guenter Roeck Reviewed-by: Stephen Boyd Signed-off-by: Douglas Anderson Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Sasha Levin --- drivers/bluetooth/hci_qca.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 34c36f0f781e..c5606a62f230 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -1090,6 +1090,7 @@ static void qca_controller_memdump(struct work_struct *work) qca->memdump_state = QCA_MEMDUMP_COLLECTED; cancel_delayed_work(&qca->ctrl_memdump_timeout); clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); + clear_bit(QCA_IBS_DISABLED, &qca->flags); mutex_unlock(&qca->hci_memdump_lock); return; } From cae19117ac360f3e30ac337d06e39409a0b59a92 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Mon, 26 Aug 2024 15:47:30 -0400 Subject: [PATCH 169/374] Bluetooth: hci_sync: Introduce hci_cmd_sync_run/hci_cmd_sync_run_once [ Upstream commit c898f6d7b093bd71e66569cd6797c87d4056f44b ] This introduces hci_cmd_sync_run/hci_cmd_sync_run_once which acts like hci_cmd_sync_queue/hci_cmd_sync_queue_once but runs immediately when already on hdev->cmd_sync_work context. Signed-off-by: Luiz Augusto von Dentz Stable-dep-of: 227a0cdf4a02 ("Bluetooth: MGMT: Fix not generating command complete for MGMT_OP_DISCONNECT") Signed-off-by: Sasha Levin --- include/net/bluetooth/hci_sync.h | 4 +++ net/bluetooth/hci_sync.c | 42 ++++++++++++++++++++++++++++++-- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h index 534c3386e714..3cb2d10cac93 100644 --- a/include/net/bluetooth/hci_sync.h +++ b/include/net/bluetooth/hci_sync.h @@ -52,6 +52,10 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy); int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy); +int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy); +int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy); struct hci_cmd_sync_work_entry * hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy); diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 4e90bd722e7b..f4a54dbc07f1 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -114,7 +114,7 @@ static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, skb_queue_tail(&req->cmd_q, skb); } -static int hci_cmd_sync_run(struct hci_request *req) +static int hci_req_sync_run(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; @@ -164,7 +164,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, hdev->req_status = HCI_REQ_PEND; - err = hci_cmd_sync_run(&req); + err = hci_req_sync_run(&req); if (err < 0) return ERR_PTR(err); @@ -730,6 +730,44 @@ int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, } EXPORT_SYMBOL(hci_cmd_sync_queue_once); +/* Run HCI command: + * + * - hdev must be running + * - if on cmd_sync_work then run immediately otherwise queue + */ +int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + /* Only queue command if hdev is running which means it had been opened + * and is either on init phase or is already up. + */ + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -ENETDOWN; + + /* If on cmd_sync_work then run immediately otherwise queue */ + if (current_work() == &hdev->cmd_sync_work) + return func(hdev, data); + + return hci_cmd_sync_submit(hdev, func, data, destroy); +} +EXPORT_SYMBOL(hci_cmd_sync_run); + +/* Run HCI command entry once: + * + * - Lookup if an entry already exist and only if it doesn't creates a new entry + * and run it. + * - if on cmd_sync_work then run immediately otherwise queue + */ +int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) + return 0; + + return hci_cmd_sync_run(hdev, func, data, destroy); +} +EXPORT_SYMBOL(hci_cmd_sync_run_once); + /* Lookup HCI command entry: * * - Return first entry that matches by function callback or data or From 58afdc9b18871eb1d461c725be9e9f3f44a39aeb Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Mon, 26 Aug 2024 16:14:04 -0400 Subject: [PATCH 170/374] Bluetooth: MGMT: Fix not generating command complete for MGMT_OP_DISCONNECT [ Upstream commit 227a0cdf4a028a73dc256d0f5144b4808d718893 ] MGMT_OP_DISCONNECT can be called while mgmt_device_connected has not been called yet, which will cause the connection procedure to be aborted, so mgmt_device_disconnected shall still respond with command complete to MGMT_OP_DISCONNECT and just not emit MGMT_EV_DEVICE_DISCONNECTED since MGMT_EV_DEVICE_CONNECTED was never sent. To fix this MGMT_OP_DISCONNECT is changed to work similarly to other command which do use hci_cmd_sync_queue and then use hci_conn_abort to disconnect and returns the result, in order for hci_conn_abort to be used from hci_cmd_sync context it now uses hci_cmd_sync_run_once. Link: https://github.com/bluez/bluez/issues/932 Fixes: 12d4a3b2ccb3 ("Bluetooth: Move check for MGMT_CONNECTED flag into mgmt.c") Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Sasha Levin --- net/bluetooth/hci_conn.c | 6 ++- net/bluetooth/mgmt.c | 84 ++++++++++++++++++++-------------------- 2 files changed, 47 insertions(+), 43 deletions(-) diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 080053a85b4d..3c74d171085d 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -2953,5 +2953,9 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason) return 0; } - return hci_cmd_sync_queue_once(hdev, abort_conn_sync, conn, NULL); + /* Run immediately if on cmd_sync_work since this may be called + * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does + * already queue its callback on cmd_sync_work. + */ + return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL); } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index fa3fa1fde5df..ba28907afb3f 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2925,7 +2925,12 @@ static int unpair_device_sync(struct hci_dev *hdev, void *data) if (!conn) return 0; - return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM); + /* Disregard any possible error since the likes of hci_abort_conn_sync + * will clean up the connection no matter the error. + */ + hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); + + return 0; } static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, @@ -3057,13 +3062,44 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, return err; } +static void disconnect_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + + cmd->cmd_complete(cmd, mgmt_status(err)); + mgmt_pending_free(cmd); +} + +static int disconnect_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_disconnect *cp = cmd->param; + struct hci_conn *conn; + + if (cp->addr.type == BDADDR_BREDR) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + + if (!conn) + return -ENOTCONN; + + /* Disregard any possible error since the likes of hci_abort_conn_sync + * will clean up the connection no matter the error. + */ + hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); + + return 0; +} + static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_disconnect *cp = data; struct mgmt_rp_disconnect rp; struct mgmt_pending_cmd *cmd; - struct hci_conn *conn; int err; bt_dev_dbg(hdev, "sock %p", sk); @@ -3086,27 +3122,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - if (pending_find(MGMT_OP_DISCONNECT, hdev)) { - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, - MGMT_STATUS_BUSY, &rp, sizeof(rp)); - goto failed; - } - - if (cp->addr.type == BDADDR_BREDR) - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, - &cp->addr.bdaddr); - else - conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, - le_addr_type(cp->addr.type)); - - if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, - MGMT_STATUS_NOT_CONNECTED, &rp, - sizeof(rp)); - goto failed; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -3114,9 +3130,10 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, cmd->cmd_complete = generic_cmd_complete; - err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM); + err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd, + disconnect_complete); if (err < 0) - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); failed: hci_dev_unlock(hdev); @@ -9634,18 +9651,6 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, mgmt_event_skb(skb, NULL); } -static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data) -{ - struct sock **sk = data; - - cmd->cmd_complete(cmd, 0); - - *sk = cmd->sk; - sock_hold(*sk); - - mgmt_pending_remove(cmd); -} - static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data) { struct hci_dev *hdev = data; @@ -9689,8 +9694,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, if (link_type != ACL_LINK && link_type != LE_LINK) return; - mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); - bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.reason = reason; @@ -9703,9 +9706,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, if (sk) sock_put(sk); - - mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, - hdev); } void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, From 6710130013658afb56d45f8be83e15d9513a6dd5 Mon Sep 17 00:00:00 2001 From: Pawel Dembicki Date: Fri, 30 Aug 2024 13:13:50 +0200 Subject: [PATCH 171/374] hwmon: ltc2991: fix register bits defines [ Upstream commit 6a422a96bc84cf9b9f0ff741f293a1f9059e0883 ] In the LTC2991, V5 and V6 channels use the low nibble of the "V5, V6, V7, and V8 Control Register" for configuration, but currently, the high nibble is defined. This patch changes the defines to use the low nibble. Fixes: 2b9ea4262ae9 ("hwmon: Add driver for ltc2991") Signed-off-by: Pawel Dembicki Message-ID: <20240830111349.30531-1-paweldembicki@gmail.com> Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin --- drivers/hwmon/ltc2991.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c index f74ce9c25bf7..d5e120dfd592 100644 --- a/drivers/hwmon/ltc2991.c +++ b/drivers/hwmon/ltc2991.c @@ -42,9 +42,9 @@ #define LTC2991_V7_V8_FILT_EN BIT(7) #define LTC2991_V7_V8_TEMP_EN BIT(5) #define LTC2991_V7_V8_DIFF_EN BIT(4) -#define LTC2991_V5_V6_FILT_EN BIT(7) -#define LTC2991_V5_V6_TEMP_EN BIT(5) -#define LTC2991_V5_V6_DIFF_EN BIT(4) +#define LTC2991_V5_V6_FILT_EN BIT(3) +#define LTC2991_V5_V6_TEMP_EN BIT(1) +#define LTC2991_V5_V6_DIFF_EN BIT(0) #define LTC2991_REPEAT_ACQ_EN BIT(4) #define LTC2991_T_INT_FILT_EN BIT(3) From 70e6473995a4ecbc3919d4ba1bf6fa2db4933416 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 23 Aug 2024 17:38:50 +0100 Subject: [PATCH 172/374] scripts: fix gfp-translate after ___GFP_*_BITS conversion to an enum MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit a3f6a89c834a4cba0f881da21307b26de3796133 ] Richard reports that since 772dd0342727c ("mm: enumerate all gfp flags"), gfp-translate is broken, as the bit numbers are implicit, leaving the shell script unable to extract them. Even more, some bits are now at a variable location, making it double extra hard to parse using a simple shell script. Use a brute-force approach to the problem by generating a small C stub that will use the enum to dump the interesting bits. As an added bonus, we are now able to identify invalid bits for a given configuration. As an added drawback, we cannot parse include files that predate this change anymore. Tough luck. Link: https://lkml.kernel.org/r/20240823163850.3791201-1-maz@kernel.org Fixes: 772dd0342727 ("mm: enumerate all gfp flags") Signed-off-by: Marc Zyngier Reported-by: Richard Weinberger Cc: Petr Tesařík Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton Signed-off-by: Sasha Levin --- scripts/gfp-translate | 66 ++++++++++++++++++++++++++++++++----------- 1 file changed, 49 insertions(+), 17 deletions(-) diff --git a/scripts/gfp-translate b/scripts/gfp-translate index 6c9aed17cf56..8385ae0d5af9 100755 --- a/scripts/gfp-translate +++ b/scripts/gfp-translate @@ -62,25 +62,57 @@ if [ "$GFPMASK" = "none" ]; then fi # Extract GFP flags from the kernel source -TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1 -grep -q ___GFP $SOURCE/include/linux/gfp_types.h -if [ $? -eq 0 ]; then - grep "^#define ___GFP" $SOURCE/include/linux/gfp_types.h | sed -e 's/u$//' | grep -v GFP_BITS > $TMPFILE -else - grep "^#define __GFP" $SOURCE/include/linux/gfp_types.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE -fi +TMPFILE=`mktemp -t gfptranslate-XXXXXX.c` || exit 1 -# Parse the flags -IFS=" -" echo Source: $SOURCE echo Parsing: $GFPMASK -for LINE in `cat $TMPFILE`; do - MASK=`echo $LINE | awk '{print $3}'` - if [ $(($GFPMASK&$MASK)) -ne 0 ]; then - echo $LINE - fi -done -rm -f $TMPFILE +( + cat < +#include + +// Try to fool compiler.h into not including extra stuff +#define __ASSEMBLY__ 1 + +#include +#include + +static const char *masks[] = { +EOF + + sed -nEe 's/^[[:space:]]+(___GFP_.*)_BIT,.*$/\1/p' $SOURCE/include/linux/gfp_types.h | + while read b; do + cat < 0) + [${b}_BIT] = "$b", +#endif +EOF + done + + cat < $TMPFILE + +${CC:-gcc} -Wall -o ${TMPFILE}.bin -I $SOURCE/include $TMPFILE && ${TMPFILE}.bin + +rm -f $TMPFILE ${TMPFILE}.bin + exit 0 From 0b7d7bca20c52a0a328f7a326ac638a156e70513 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 29 Aug 2024 22:22:45 +0300 Subject: [PATCH 173/374] igc: Unlock on error in igc_io_resume() [ Upstream commit ef4a99a0164e3972abb421cbb1b09ea6c61414df ] Call rtnl_unlock() on this error path, before returning. Fixes: bc23aa949aeb ("igc: Add pcie error handler support") Signed-off-by: Dan Carpenter Reviewed-by: Gerhard Engleder Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/igc/igc_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 3041f8142324..773136925fd0 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -7417,6 +7417,7 @@ static void igc_io_resume(struct pci_dev *pdev) rtnl_lock(); if (netif_running(netdev)) { if (igc_open(netdev)) { + rtnl_unlock(); netdev_err(netdev, "igc_open failed after reset\n"); return; } From 4b19c83ba108aa66226da5b79810e4d19e005f12 Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Sun, 1 Sep 2024 05:10:51 +0200 Subject: [PATCH 174/374] hwmon: (hp-wmi-sensors) Check if WMI event data exists MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit a54da9df75cd1b4b5028f6c60f9a211532680585 ] The BIOS can choose to return no event data in response to a WMI event, so the ACPI object passed to the WMI notify handler can be NULL. Check for such a situation and ignore the event in such a case. Fixes: 23902f98f8d4 ("hwmon: add HP WMI Sensors driver") Signed-off-by: Armin Wolf Reviewed-by: Ilpo Järvinen Message-ID: <20240901031055.3030-2-W_Armin@gmx.de> Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin --- drivers/hwmon/hp-wmi-sensors.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/hwmon/hp-wmi-sensors.c b/drivers/hwmon/hp-wmi-sensors.c index b5325d0e72b9..dfa1d6926dea 100644 --- a/drivers/hwmon/hp-wmi-sensors.c +++ b/drivers/hwmon/hp-wmi-sensors.c @@ -1637,6 +1637,8 @@ static void hp_wmi_notify(u32 value, void *context) goto out_unlock; wobj = out.pointer; + if (!wobj) + goto out_unlock; err = populate_event_from_wobj(dev, &event, wobj); if (err) { From 1b8719ac77cf93a351ad76c0f55ad5a1c71ee033 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 27 Aug 2024 22:29:53 -0700 Subject: [PATCH 175/374] perf lock contention: Fix spinlock and rwlock accounting [ Upstream commit 287bd5cf06e0f2c02293ce942777ad1f18059ed3 ] The spinlock and rwlock use a single-element per-cpu array to track current locks due to performance reason. But this means the key is always available and it cannot simply account lock stats in the array because some of them are invalid. In fact, the contention_end() program in the BPF invalidates the entry by setting the 'lock' value to 0 instead of deleting the entry for the hashmap. So it should skip entries with the lock value of 0 in the account_end_timestamp(). Otherwise, it'd have spurious high contention on an idle machine: $ sudo perf lock con -ab -Y spinlock sleep 3 contended total wait max wait avg wait type caller 8 4.72 s 1.84 s 590.46 ms spinlock rcu_core+0xc7 8 1.87 s 1.87 s 233.48 ms spinlock process_one_work+0x1b5 2 1.87 s 1.87 s 933.92 ms spinlock worker_thread+0x1a2 3 1.81 s 1.81 s 603.93 ms spinlock tmigr_update_events+0x13c 2 1.72 s 1.72 s 861.98 ms spinlock tick_do_update_jiffies64+0x25 6 42.48 us 13.02 us 7.08 us spinlock futex_q_lock+0x2a 1 13.03 us 13.03 us 13.03 us spinlock futex_wake+0xce 1 11.61 us 11.61 us 11.61 us spinlock rcu_core+0xc7 I don't believe it has contention on a spinlock longer than 1 second. After this change, it only reports some small contentions. $ sudo perf lock con -ab -Y spinlock sleep 3 contended total wait max wait avg wait type caller 4 133.51 us 43.29 us 33.38 us spinlock tick_do_update_jiffies64+0x25 4 69.06 us 31.82 us 17.27 us spinlock process_one_work+0x1b5 2 50.66 us 25.77 us 25.33 us spinlock rcu_core+0xc7 1 28.45 us 28.45 us 28.45 us spinlock rcu_core+0xc7 1 24.77 us 24.77 us 24.77 us spinlock tmigr_update_events+0x13c 1 23.34 us 23.34 us 23.34 us spinlock raw_spin_rq_lock_nested+0x15 Fixes: b5711042a1c8 ("perf lock contention: Use per-cpu array map for spinlocks") Reported-by: Xi Wang Cc: Song Liu Cc: bpf@vger.kernel.org Link: https://lore.kernel.org/r/20240828052953.1445862-1-namhyung@kernel.org Signed-off-by: Namhyung Kim Signed-off-by: Sasha Levin --- tools/perf/util/bpf_lock_contention.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c index b4cb3fe5cc25..bc4e92c0c08b 100644 --- a/tools/perf/util/bpf_lock_contention.c +++ b/tools/perf/util/bpf_lock_contention.c @@ -286,6 +286,9 @@ static void account_end_timestamp(struct lock_contention *con) goto next; for (int i = 0; i < total_cpus; i++) { + if (cpu_data[i].lock == 0) + continue; + update_lock_stat(stat_fd, -1, end_ts, aggr_mode, &cpu_data[i]); } From 6a920f041aeb5992b855a6f8f53278f91589f18e Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Thu, 29 Aug 2024 15:03:21 +0300 Subject: [PATCH 176/374] net: ethernet: ti: am65-cpsw: Fix RX statistics for XDP_TX and XDP_REDIRECT [ Upstream commit 624d3291484f9cada10660f820db926c0bce7741 ] We are not using ndev->stats for rx_packets and rx_bytes anymore. Instead, we use per CPU stats which are collated in am65_cpsw_nuss_ndo_get_stats(). Fix RX statistics for XDP_TX and XDP_REDIRECT cases. Fixes: 8acacc40f733 ("net: ethernet: ti: am65-cpsw: Add minimal XDP support") Signed-off-by: Roger Quadros Reviewed-by: Jacob Keller Acked-by: Julien Panis Reviewed-by: MD Danish Anwar Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/net/ethernet/ti/am65-cpsw-nuss.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 902b22de61d1..330eea349caa 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -998,7 +998,9 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, int desc_idx, int cpu, int *len) { struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; + struct am65_cpsw_ndev_priv *ndev_priv; struct net_device *ndev = port->ndev; + struct am65_cpsw_ndev_stats *stats; int ret = AM65_CPSW_XDP_CONSUMED; struct am65_cpsw_tx_chn *tx_chn; struct netdev_queue *netif_txq; @@ -1016,6 +1018,9 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, /* XDP prog might have changed packet data and boundaries */ *len = xdp->data_end - xdp->data; + ndev_priv = netdev_priv(ndev); + stats = this_cpu_ptr(ndev_priv->stats); + switch (act) { case XDP_PASS: ret = AM65_CPSW_XDP_PASS; @@ -1035,16 +1040,20 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, if (err) goto drop; - ndev->stats.rx_bytes += *len; - ndev->stats.rx_packets++; + u64_stats_update_begin(&stats->syncp); + stats->rx_bytes += *len; + stats->rx_packets++; + u64_stats_update_end(&stats->syncp); ret = AM65_CPSW_XDP_CONSUMED; goto out; case XDP_REDIRECT: if (unlikely(xdp_do_redirect(ndev, xdp, prog))) goto drop; - ndev->stats.rx_bytes += *len; - ndev->stats.rx_packets++; + u64_stats_update_begin(&stats->syncp); + stats->rx_bytes += *len; + stats->rx_packets++; + u64_stats_update_end(&stats->syncp); ret = AM65_CPSW_XDP_REDIRECT; goto out; default: From d9c8dbbc236cdc6231ee91cdede2fc97b430cfff Mon Sep 17 00:00:00 2001 From: Jinjie Ruan Date: Fri, 30 Aug 2024 10:20:25 +0800 Subject: [PATCH 177/374] net: phy: Fix missing of_node_put() for leds [ Upstream commit 2560db6ede1aaf162a73b2df43e0b6c5ed8819f7 ] The call of of_get_child_by_name() will cause refcount incremented for leds, if it succeeds, it should call of_node_put() to decrease it, fix it. Fixes: 01e5b728e9e4 ("net: phy: Add a binding for PHY LEDs") Reviewed-by: Jonathan Cameron Signed-off-by: Jinjie Ruan Reviewed-by: Andrew Lunn Link: https://patch.msgid.link/20240830022025.610844-1-ruanjinjie@huawei.com Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/net/phy/phy_device.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 6c6ec9475709..2c0ee5cf8b6e 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3346,11 +3346,13 @@ static int of_phy_leds(struct phy_device *phydev) err = of_phy_led(phydev, led); if (err) { of_node_put(led); + of_node_put(leds); phy_leds_unregister(phydev); return err; } } + of_node_put(leds); return 0; } From 45e1755eedd6f562202f997f95c63ea1d6df88fb Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Thu, 29 Aug 2024 11:36:01 -0700 Subject: [PATCH 178/374] ptp: ocp: convert serial ports to array [ Upstream commit d7875b4b078f7e2d862e88aed99c3ea0381aa189 ] Simplify serial port management code by using array of ports and helpers to get the name of the port. This change is needed to make the next patch simplier. Signed-off-by: Vadim Fedorenko Reviewed-by: Greg Kroah-Hartman Signed-off-by: Paolo Abeni Stable-dep-of: 82ace0c8fe9b ("ptp: ocp: adjust sysfs entries to expose tty information") Signed-off-by: Sasha Levin --- drivers/ptp/ptp_ocp.c | 120 ++++++++++++++++++++---------------------- 1 file changed, 57 insertions(+), 63 deletions(-) diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index ee2ced88ab34..46369de8e30b 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -316,6 +316,15 @@ struct ptp_ocp_serial_port { #define OCP_SERIAL_LEN 6 #define OCP_SMA_NUM 4 +enum { + PORT_GNSS, + PORT_GNSS2, + PORT_MAC, /* miniature atomic clock */ + PORT_NMEA, + + __PORT_COUNT, +}; + struct ptp_ocp { struct pci_dev *pdev; struct device dev; @@ -357,10 +366,7 @@ struct ptp_ocp { struct delayed_work sync_work; int id; int n_irqs; - struct ptp_ocp_serial_port gnss_port; - struct ptp_ocp_serial_port gnss2_port; - struct ptp_ocp_serial_port mac_port; /* miniature atomic clock */ - struct ptp_ocp_serial_port nmea_port; + struct ptp_ocp_serial_port port[__PORT_COUNT]; bool fw_loader; u8 fw_tag; u16 fw_version; @@ -655,28 +661,28 @@ static struct ocp_resource ocp_fb_resource[] = { }, }, { - OCP_SERIAL_RESOURCE(gnss_port), + OCP_SERIAL_RESOURCE(port[PORT_GNSS]), .offset = 0x00160000 + 0x1000, .irq_vec = 3, .extra = &(struct ptp_ocp_serial_port) { .baud = 115200, }, }, { - OCP_SERIAL_RESOURCE(gnss2_port), + OCP_SERIAL_RESOURCE(port[PORT_GNSS2]), .offset = 0x00170000 + 0x1000, .irq_vec = 4, .extra = &(struct ptp_ocp_serial_port) { .baud = 115200, }, }, { - OCP_SERIAL_RESOURCE(mac_port), + OCP_SERIAL_RESOURCE(port[PORT_MAC]), .offset = 0x00180000 + 0x1000, .irq_vec = 5, .extra = &(struct ptp_ocp_serial_port) { .baud = 57600, }, }, { - OCP_SERIAL_RESOURCE(nmea_port), + OCP_SERIAL_RESOURCE(port[PORT_NMEA]), .offset = 0x00190000 + 0x1000, .irq_vec = 10, }, { @@ -740,7 +746,7 @@ static struct ocp_resource ocp_art_resource[] = { .offset = 0x01000000, .size = 0x10000, }, { - OCP_SERIAL_RESOURCE(gnss_port), + OCP_SERIAL_RESOURCE(port[PORT_GNSS]), .offset = 0x00160000 + 0x1000, .irq_vec = 3, .extra = &(struct ptp_ocp_serial_port) { .baud = 115200, @@ -839,7 +845,7 @@ static struct ocp_resource ocp_art_resource[] = { }, }, { - OCP_SERIAL_RESOURCE(mac_port), + OCP_SERIAL_RESOURCE(port[PORT_MAC]), .offset = 0x00190000, .irq_vec = 7, .extra = &(struct ptp_ocp_serial_port) { .baud = 9600, @@ -950,14 +956,14 @@ static struct ocp_resource ocp_adva_resource[] = { .offset = 0x00220000, .size = 0x1000, }, { - OCP_SERIAL_RESOURCE(gnss_port), + OCP_SERIAL_RESOURCE(port[PORT_GNSS]), .offset = 0x00160000 + 0x1000, .irq_vec = 3, .extra = &(struct ptp_ocp_serial_port) { .baud = 9600, }, }, { - OCP_SERIAL_RESOURCE(mac_port), + OCP_SERIAL_RESOURCE(port[PORT_MAC]), .offset = 0x00180000 + 0x1000, .irq_vec = 5, .extra = &(struct ptp_ocp_serial_port) { .baud = 115200, @@ -1649,6 +1655,15 @@ ptp_ocp_tod_gnss_name(int idx) return gnss_name[idx]; } +static const char * +ptp_ocp_tty_port_name(int idx) +{ + static const char * const tty_name[] = { + "GNSS", "GNSS2", "MAC", "NMEA" + }; + return tty_name[idx]; +} + struct ptp_ocp_nvmem_match_info { struct ptp_ocp *bp; const void * const tag; @@ -3960,16 +3975,11 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) bp = dev_get_drvdata(dev); seq_printf(s, "%7s: /dev/ptp%d\n", "PTP", ptp_clock_index(bp->ptp)); - if (bp->gnss_port.line != -1) - seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS1", - bp->gnss_port.line); - if (bp->gnss2_port.line != -1) - seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS2", - bp->gnss2_port.line); - if (bp->mac_port.line != -1) - seq_printf(s, "%7s: /dev/ttyS%d\n", "MAC", bp->mac_port.line); - if (bp->nmea_port.line != -1) - seq_printf(s, "%7s: /dev/ttyS%d\n", "NMEA", bp->nmea_port.line); + for (i = 0; i < __PORT_COUNT; i++) { + if (bp->port[i].line != -1) + seq_printf(s, "%7s: /dev/ttyS%d\n", ptp_ocp_tty_port_name(i), + bp->port[i].line); + } memset(sma_val, 0xff, sizeof(sma_val)); if (bp->sma_map1) { @@ -4279,7 +4289,7 @@ ptp_ocp_dev_release(struct device *dev) static int ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev) { - int err; + int i, err; mutex_lock(&ptp_ocp_lock); err = idr_alloc(&ptp_ocp_idr, bp, 0, 0, GFP_KERNEL); @@ -4292,10 +4302,10 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev) bp->ptp_info = ptp_ocp_clock_info; spin_lock_init(&bp->lock); - bp->gnss_port.line = -1; - bp->gnss2_port.line = -1; - bp->mac_port.line = -1; - bp->nmea_port.line = -1; + + for (i = 0; i < __PORT_COUNT; i++) + bp->port[i].line = -1; + bp->pdev = pdev; device_initialize(&bp->dev); @@ -4351,23 +4361,15 @@ ptp_ocp_complete(struct ptp_ocp *bp) { struct pps_device *pps; char buf[32]; + int i; - if (bp->gnss_port.line != -1) { - sprintf(buf, "ttyS%d", bp->gnss_port.line); - ptp_ocp_link_child(bp, buf, "ttyGNSS"); - } - if (bp->gnss2_port.line != -1) { - sprintf(buf, "ttyS%d", bp->gnss2_port.line); - ptp_ocp_link_child(bp, buf, "ttyGNSS2"); - } - if (bp->mac_port.line != -1) { - sprintf(buf, "ttyS%d", bp->mac_port.line); - ptp_ocp_link_child(bp, buf, "ttyMAC"); - } - if (bp->nmea_port.line != -1) { - sprintf(buf, "ttyS%d", bp->nmea_port.line); - ptp_ocp_link_child(bp, buf, "ttyNMEA"); + for (i = 0; i < __PORT_COUNT; i++) { + if (bp->port[i].line != -1) { + sprintf(buf, "ttyS%d", bp->port[i].line); + ptp_ocp_link_child(bp, buf, ptp_ocp_tty_port_name(i)); + } } + sprintf(buf, "ptp%d", ptp_clock_index(bp->ptp)); ptp_ocp_link_child(bp, buf, "ptp"); @@ -4416,23 +4418,20 @@ ptp_ocp_info(struct ptp_ocp *bp) }; struct device *dev = &bp->pdev->dev; u32 reg; + int i; ptp_ocp_phc_info(bp); - ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port.line, - bp->gnss_port.baud); - ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port.line, - bp->gnss2_port.baud); - ptp_ocp_serial_info(dev, "MAC", bp->mac_port.line, bp->mac_port.baud); - if (bp->nmea_out && bp->nmea_port.line != -1) { - bp->nmea_port.baud = -1; + for (i = 0; i < __PORT_COUNT; i++) { + if (i == PORT_NMEA && bp->nmea_out && bp->port[PORT_NMEA].line != -1) { + bp->port[PORT_NMEA].baud = -1; - reg = ioread32(&bp->nmea_out->uart_baud); - if (reg < ARRAY_SIZE(nmea_baud)) - bp->nmea_port.baud = nmea_baud[reg]; - - ptp_ocp_serial_info(dev, "NMEA", bp->nmea_port.line, - bp->nmea_port.baud); + reg = ioread32(&bp->nmea_out->uart_baud); + if (reg < ARRAY_SIZE(nmea_baud)) + bp->port[PORT_NMEA].baud = nmea_baud[reg]; + } + ptp_ocp_serial_info(dev, ptp_ocp_tty_port_name(i), bp->port[i].line, + bp->port[i].baud); } } @@ -4473,14 +4472,9 @@ ptp_ocp_detach(struct ptp_ocp *bp) for (i = 0; i < 4; i++) if (bp->signal_out[i]) ptp_ocp_unregister_ext(bp->signal_out[i]); - if (bp->gnss_port.line != -1) - serial8250_unregister_port(bp->gnss_port.line); - if (bp->gnss2_port.line != -1) - serial8250_unregister_port(bp->gnss2_port.line); - if (bp->mac_port.line != -1) - serial8250_unregister_port(bp->mac_port.line); - if (bp->nmea_port.line != -1) - serial8250_unregister_port(bp->nmea_port.line); + for (i = 0; i < __PORT_COUNT; i++) + if (bp->port[i].line != -1) + serial8250_unregister_port(bp->port[i].line); platform_device_unregister(bp->spi_flash); platform_device_unregister(bp->i2c_ctrl); if (bp->i2c_clk) From ced0328530806ffeb2e1ed1129cc3a43e1493e02 Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Thu, 29 Aug 2024 11:36:02 -0700 Subject: [PATCH 179/374] ptp: ocp: adjust sysfs entries to expose tty information [ Upstream commit 82ace0c8fe9b025eaa273365e27057402cdaeb02 ] Implement additional attribute group to expose serial port information. Fixes tag points to the commit which introduced the change in serial port subsystem and made it impossible to use symlinks. Fixes: b286f4e87e32 ("serial: core: Move tty and serdev to be children of serial core port device") Signed-off-by: Vadim Fedorenko Reviewed-by: Greg Kroah-Hartman Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/ptp/ptp_ocp.c | 62 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 51 insertions(+), 11 deletions(-) diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 46369de8e30b..e7479b9b90cb 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -3361,6 +3361,54 @@ static EXT_ATTR_RO(freq, frequency, 1); static EXT_ATTR_RO(freq, frequency, 2); static EXT_ATTR_RO(freq, frequency, 3); +static ssize_t +ptp_ocp_tty_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + struct ptp_ocp *bp = dev_get_drvdata(dev); + + return sysfs_emit(buf, "ttyS%d", bp->port[(uintptr_t)ea->var].line); +} + +static umode_t +ptp_ocp_timecard_tty_is_visible(struct kobject *kobj, struct attribute *attr, int n) +{ + struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj)); + struct ptp_ocp_serial_port *port; + struct device_attribute *dattr; + struct dev_ext_attribute *ea; + + if (strncmp(attr->name, "tty", 3)) + return attr->mode; + + dattr = container_of(attr, struct device_attribute, attr); + ea = container_of(dattr, struct dev_ext_attribute, attr); + port = &bp->port[(uintptr_t)ea->var]; + return port->line == -1 ? 0 : 0444; +} + +#define EXT_TTY_ATTR_RO(_name, _val) \ + struct dev_ext_attribute dev_attr_tty##_name = \ + { __ATTR(tty##_name, 0444, ptp_ocp_tty_show, NULL), (void *)_val } + +static EXT_TTY_ATTR_RO(GNSS, PORT_GNSS); +static EXT_TTY_ATTR_RO(GNSS2, PORT_GNSS2); +static EXT_TTY_ATTR_RO(MAC, PORT_MAC); +static EXT_TTY_ATTR_RO(NMEA, PORT_NMEA); +static struct attribute *ptp_ocp_timecard_tty_attrs[] = { + &dev_attr_ttyGNSS.attr.attr, + &dev_attr_ttyGNSS2.attr.attr, + &dev_attr_ttyMAC.attr.attr, + &dev_attr_ttyNMEA.attr.attr, + NULL, +}; + +static const struct attribute_group ptp_ocp_timecard_tty_group = { + .name = "tty", + .attrs = ptp_ocp_timecard_tty_attrs, + .is_visible = ptp_ocp_timecard_tty_is_visible, +}; + static ssize_t serialnum_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -3790,6 +3838,7 @@ static const struct attribute_group fb_timecard_group = { static const struct ocp_attr_group fb_timecard_groups[] = { { .cap = OCP_CAP_BASIC, .group = &fb_timecard_group }, + { .cap = OCP_CAP_BASIC, .group = &ptp_ocp_timecard_tty_group }, { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group }, { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal1_group }, { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal2_group }, @@ -3829,6 +3878,7 @@ static const struct attribute_group art_timecard_group = { static const struct ocp_attr_group art_timecard_groups[] = { { .cap = OCP_CAP_BASIC, .group = &art_timecard_group }, + { .cap = OCP_CAP_BASIC, .group = &ptp_ocp_timecard_tty_group }, { }, }; @@ -3856,6 +3906,7 @@ static const struct attribute_group adva_timecard_group = { static const struct ocp_attr_group adva_timecard_groups[] = { { .cap = OCP_CAP_BASIC, .group = &adva_timecard_group }, + { .cap = OCP_CAP_BASIC, .group = &ptp_ocp_timecard_tty_group }, { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group }, { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal1_group }, { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq0_group }, @@ -4361,14 +4412,6 @@ ptp_ocp_complete(struct ptp_ocp *bp) { struct pps_device *pps; char buf[32]; - int i; - - for (i = 0; i < __PORT_COUNT; i++) { - if (bp->port[i].line != -1) { - sprintf(buf, "ttyS%d", bp->port[i].line); - ptp_ocp_link_child(bp, buf, ptp_ocp_tty_port_name(i)); - } - } sprintf(buf, "ptp%d", ptp_clock_index(bp->ptp)); ptp_ocp_link_child(bp, buf, "ptp"); @@ -4440,9 +4483,6 @@ ptp_ocp_detach_sysfs(struct ptp_ocp *bp) { struct device *dev = &bp->dev; - sysfs_remove_link(&dev->kobj, "ttyGNSS"); - sysfs_remove_link(&dev->kobj, "ttyGNSS2"); - sysfs_remove_link(&dev->kobj, "ttyMAC"); sysfs_remove_link(&dev->kobj, "ptp"); sysfs_remove_link(&dev->kobj, "pps"); } From 2285c2faef19ee08a6bd6754f4c3ec07dceb2889 Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Fri, 23 Aug 2024 11:59:26 +0200 Subject: [PATCH 180/374] ice: move netif_queue_set_napi to rtnl-protected sections [ Upstream commit 2a5dc090b92cfa5270e20056074241c6db5c9cdd ] Currently, netif_queue_set_napi() is called from ice_vsi_rebuild() that is not rtnl-locked when called from the reset. This creates the need to take the rtnl_lock just for a single function and complicates the synchronization with .ndo_bpf. At the same time, there no actual need to fill napi-to-queue information at this exact point. Fill napi-to-queue information when opening the VSI and clear it when the VSI is being closed. Those routines are already rtnl-locked. Also, rewrite napi-to-queue assignment in a way that prevents inclusion of XDP queues, as this leads to out-of-bounds writes, such as one below. [ +0.000004] BUG: KASAN: slab-out-of-bounds in netif_queue_set_napi+0x1c2/0x1e0 [ +0.000012] Write of size 8 at addr ffff889881727c80 by task bash/7047 [ +0.000006] CPU: 24 PID: 7047 Comm: bash Not tainted 6.10.0-rc2+ #2 [ +0.000004] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0014.082620210524 08/26/2021 [ +0.000003] Call Trace: [ +0.000003] [ +0.000002] dump_stack_lvl+0x60/0x80 [ +0.000007] print_report+0xce/0x630 [ +0.000007] ? __pfx__raw_spin_lock_irqsave+0x10/0x10 [ +0.000007] ? __virt_addr_valid+0x1c9/0x2c0 [ +0.000005] ? netif_queue_set_napi+0x1c2/0x1e0 [ +0.000003] kasan_report+0xe9/0x120 [ +0.000004] ? netif_queue_set_napi+0x1c2/0x1e0 [ +0.000004] netif_queue_set_napi+0x1c2/0x1e0 [ +0.000005] ice_vsi_close+0x161/0x670 [ice] [ +0.000114] ice_dis_vsi+0x22f/0x270 [ice] [ +0.000095] ice_pf_dis_all_vsi.constprop.0+0xae/0x1c0 [ice] [ +0.000086] ice_prepare_for_reset+0x299/0x750 [ice] [ +0.000087] pci_dev_save_and_disable+0x82/0xd0 [ +0.000006] pci_reset_function+0x12d/0x230 [ +0.000004] reset_store+0xa0/0x100 [ +0.000006] ? __pfx_reset_store+0x10/0x10 [ +0.000002] ? __pfx_mutex_lock+0x10/0x10 [ +0.000004] ? __check_object_size+0x4c1/0x640 [ +0.000007] kernfs_fop_write_iter+0x30b/0x4a0 [ +0.000006] vfs_write+0x5d6/0xdf0 [ +0.000005] ? fd_install+0x180/0x350 [ +0.000005] ? __pfx_vfs_write+0x10/0xA10 [ +0.000004] ? do_fcntl+0x52c/0xcd0 [ +0.000004] ? kasan_save_track+0x13/0x60 [ +0.000003] ? kasan_save_free_info+0x37/0x60 [ +0.000006] ksys_write+0xfa/0x1d0 [ +0.000003] ? __pfx_ksys_write+0x10/0x10 [ +0.000002] ? __x64_sys_fcntl+0x121/0x180 [ +0.000004] ? _raw_spin_lock+0x87/0xe0 [ +0.000005] do_syscall_64+0x80/0x170 [ +0.000007] ? _raw_spin_lock+0x87/0xe0 [ +0.000004] ? __pfx__raw_spin_lock+0x10/0x10 [ +0.000003] ? file_close_fd_locked+0x167/0x230 [ +0.000005] ? syscall_exit_to_user_mode+0x7d/0x220 [ +0.000005] ? do_syscall_64+0x8c/0x170 [ +0.000004] ? do_syscall_64+0x8c/0x170 [ +0.000003] ? do_syscall_64+0x8c/0x170 [ +0.000003] ? fput+0x1a/0x2c0 [ +0.000004] ? filp_close+0x19/0x30 [ +0.000004] ? do_dup2+0x25a/0x4c0 [ +0.000004] ? __x64_sys_dup2+0x6e/0x2e0 [ +0.000002] ? syscall_exit_to_user_mode+0x7d/0x220 [ +0.000004] ? do_syscall_64+0x8c/0x170 [ +0.000003] ? __count_memcg_events+0x113/0x380 [ +0.000005] ? handle_mm_fault+0x136/0x820 [ +0.000005] ? do_user_addr_fault+0x444/0xa80 [ +0.000004] ? clear_bhb_loop+0x25/0x80 [ +0.000004] ? clear_bhb_loop+0x25/0x80 [ +0.000002] entry_SYSCALL_64_after_hwframe+0x76/0x7e [ +0.000005] RIP: 0033:0x7f2033593154 Fixes: 080b0c8d6d26 ("ice: Fix ASSERT_RTNL() warning during certain scenarios") Fixes: 91fdbce7e8d6 ("ice: Add support in the driver for associating queue with napi") Reviewed-by: Wojciech Drewek Reviewed-by: Jacob Keller Reviewed-by: Amritha Nambiar Signed-off-by: Larysa Zaremba Reviewed-by: Maciej Fijalkowski Tested-by: George Kuruvinakunnel Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/ice/ice_base.c | 11 +- drivers/net/ethernet/intel/ice/ice_lib.c | 129 ++++++---------------- drivers/net/ethernet/intel/ice/ice_lib.h | 10 +- drivers/net/ethernet/intel/ice/ice_main.c | 17 ++- 4 files changed, 49 insertions(+), 118 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index f448d3a84564..c158749a80e0 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -190,16 +190,11 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) } q_vector = vsi->q_vectors[v_idx]; - ice_for_each_tx_ring(tx_ring, q_vector->tx) { - ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX, - NULL); + ice_for_each_tx_ring(tx_ring, vsi->q_vectors[v_idx]->tx) tx_ring->q_vector = NULL; - } - ice_for_each_rx_ring(rx_ring, q_vector->rx) { - ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX, - NULL); + + ice_for_each_rx_ring(rx_ring, vsi->q_vectors[v_idx]->rx) rx_ring->q_vector = NULL; - } /* only VSI with an associated netdev is set up with NAPI */ if (vsi->netdev) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 7629b0190578..cdf05e57499f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2286,9 +2286,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi) ice_vsi_map_rings_to_vectors(vsi); - /* Associate q_vector rings to napi */ - ice_vsi_set_napi_queues(vsi); - vsi->stat_offsets_loaded = false; /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ @@ -2628,6 +2625,7 @@ void ice_vsi_close(struct ice_vsi *vsi) if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) ice_down(vsi); + ice_vsi_clear_napi_queues(vsi); ice_vsi_free_irq(vsi); ice_vsi_free_tx_rings(vsi); ice_vsi_free_rx_rings(vsi); @@ -2694,120 +2692,55 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) } /** - * __ice_queue_set_napi - Set the napi instance for the queue - * @dev: device to which NAPI and queue belong - * @queue_index: Index of queue - * @type: queue type as RX or TX - * @napi: NAPI context - * @locked: is the rtnl_lock already held - * - * Set the napi instance for the queue. Caller indicates the lock status. - */ -static void -__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, - enum netdev_queue_type type, struct napi_struct *napi, - bool locked) -{ - if (!locked) - rtnl_lock(); - netif_queue_set_napi(dev, queue_index, type, napi); - if (!locked) - rtnl_unlock(); -} - -/** - * ice_queue_set_napi - Set the napi instance for the queue - * @vsi: VSI being configured - * @queue_index: Index of queue - * @type: queue type as RX or TX - * @napi: NAPI context + * ice_vsi_set_napi_queues - associate netdev queues with napi + * @vsi: VSI pointer * - * Set the napi instance for the queue. The rtnl lock state is derived from the - * execution path. + * Associate queue[s] with napi for all vectors. + * The caller must hold rtnl_lock. */ -void -ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, - enum netdev_queue_type type, struct napi_struct *napi) +void ice_vsi_set_napi_queues(struct ice_vsi *vsi) { - struct ice_pf *pf = vsi->back; + struct net_device *netdev = vsi->netdev; + int q_idx, v_idx; - if (!vsi->netdev) + if (!netdev) return; - if (current_work() == &pf->serv_task || - test_bit(ICE_PREPARED_FOR_RESET, pf->state) || - test_bit(ICE_DOWN, pf->state) || - test_bit(ICE_SUSPENDED, pf->state)) - __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, - false); - else - __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, - true); -} + ice_for_each_rxq(vsi, q_idx) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, + &vsi->rx_rings[q_idx]->q_vector->napi); -/** - * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi - * @q_vector: q_vector pointer - * @locked: is the rtnl_lock already held - * - * Associate the q_vector napi with all the queue[s] on the vector. - * Caller indicates the lock status. - */ -void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) -{ - struct ice_rx_ring *rx_ring; - struct ice_tx_ring *tx_ring; - - ice_for_each_rx_ring(rx_ring, q_vector->rx) - __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, - NETDEV_QUEUE_TYPE_RX, &q_vector->napi, - locked); - - ice_for_each_tx_ring(tx_ring, q_vector->tx) - __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, - NETDEV_QUEUE_TYPE_TX, &q_vector->napi, - locked); + ice_for_each_txq(vsi, q_idx) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, + &vsi->tx_rings[q_idx]->q_vector->napi); /* Also set the interrupt number for the NAPI */ - netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); -} + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; -/** - * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi - * @q_vector: q_vector pointer - * - * Associate the q_vector napi with all the queue[s] on the vector - */ -void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector) -{ - struct ice_rx_ring *rx_ring; - struct ice_tx_ring *tx_ring; - - ice_for_each_rx_ring(rx_ring, q_vector->rx) - ice_queue_set_napi(q_vector->vsi, rx_ring->q_index, - NETDEV_QUEUE_TYPE_RX, &q_vector->napi); - - ice_for_each_tx_ring(tx_ring, q_vector->tx) - ice_queue_set_napi(q_vector->vsi, tx_ring->q_index, - NETDEV_QUEUE_TYPE_TX, &q_vector->napi); - /* Also set the interrupt number for the NAPI */ - netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); + netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); + } } /** - * ice_vsi_set_napi_queues + * ice_vsi_clear_napi_queues - dissociate netdev queues from napi * @vsi: VSI pointer * - * Associate queue[s] with napi for all vectors + * Clear the association between all VSI queues queue[s] and napi. + * The caller must hold rtnl_lock. */ -void ice_vsi_set_napi_queues(struct ice_vsi *vsi) +void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) { - int i; + struct net_device *netdev = vsi->netdev; + int q_idx; - if (!vsi->netdev) + if (!netdev) return; - ice_for_each_q_vector(vsi, i) - ice_q_vector_set_napi_queues(vsi->q_vectors[i]); + ice_for_each_txq(vsi, q_idx) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL); + + ice_for_each_rxq(vsi, q_idx) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, NULL); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 94ce8964dda6..36d86535695d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -44,16 +44,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); -void -ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, - enum netdev_queue_type type, struct napi_struct *napi); - -void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked); - -void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector); - void ice_vsi_set_napi_queues(struct ice_vsi *vsi); +void ice_vsi_clear_napi_queues(struct ice_vsi *vsi); + int ice_vsi_release(struct ice_vsi *vsi); void ice_vsi_close(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 209bfd70c430..0e0086494a54 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3559,11 +3559,9 @@ static void ice_napi_add(struct ice_vsi *vsi) if (!vsi->netdev) return; - ice_for_each_q_vector(vsi, v_idx) { + ice_for_each_q_vector(vsi, v_idx) netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, ice_napi_poll); - __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); - } } /** @@ -5541,7 +5539,9 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) if (ret) goto err_reinit; ice_vsi_map_rings_to_vectors(pf->vsi[v]); + rtnl_lock(); ice_vsi_set_napi_queues(pf->vsi[v]); + rtnl_unlock(); } ret = ice_req_irq_msix_misc(pf); @@ -5555,8 +5555,12 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) err_reinit: while (v--) - if (pf->vsi[v]) + if (pf->vsi[v]) { + rtnl_lock(); + ice_vsi_clear_napi_queues(pf->vsi[v]); + rtnl_unlock(); ice_vsi_free_q_vectors(pf->vsi[v]); + } return ret; } @@ -5621,6 +5625,9 @@ static int ice_suspend(struct device *dev) ice_for_each_vsi(pf, v) { if (!pf->vsi[v]) continue; + rtnl_lock(); + ice_vsi_clear_napi_queues(pf->vsi[v]); + rtnl_unlock(); ice_vsi_free_q_vectors(pf->vsi[v]); } ice_clear_interrupt_scheme(pf); @@ -7456,6 +7463,8 @@ int ice_vsi_open(struct ice_vsi *vsi) err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); if (err) goto err_set_qs; + + ice_vsi_set_napi_queues(vsi); } err = ice_up_complete(vsi); From 391f7dae3d836891fc6cfbde38add2d0e10c6b7f Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Fri, 23 Aug 2024 11:59:27 +0200 Subject: [PATCH 181/374] ice: protect XDP configuration with a mutex [ Upstream commit 2504b8405768a57a71e660dbfd5abd59f679a03f ] The main threat to data consistency in ice_xdp() is a possible asynchronous PF reset. It can be triggered by a user or by TX timeout handler. XDP setup and PF reset code access the same resources in the following sections: * ice_vsi_close() in ice_prepare_for_reset() - already rtnl-locked * ice_vsi_rebuild() for the PF VSI - not protected * ice_vsi_open() - already rtnl-locked With an unfortunate timing, such accesses can result in a crash such as the one below: [ +1.999878] ice 0000:b1:00.0: Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring 14 [ +2.002992] ice 0000:b1:00.0: Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring 18 [Mar15 18:17] ice 0000:b1:00.0 ens801f0np0: NETDEV WATCHDOG: CPU: 38: transmit queue 14 timed out 80692736 ms [ +0.000093] ice 0000:b1:00.0 ens801f0np0: tx_timeout: VSI_num: 6, Q 14, NTC: 0x0, HW_HEAD: 0x0, NTU: 0x0, INT: 0x4000001 [ +0.000012] ice 0000:b1:00.0 ens801f0np0: tx_timeout recovery level 1, txqueue 14 [ +0.394718] ice 0000:b1:00.0: PTP reset successful [ +0.006184] BUG: kernel NULL pointer dereference, address: 0000000000000098 [ +0.000045] #PF: supervisor read access in kernel mode [ +0.000023] #PF: error_code(0x0000) - not-present page [ +0.000023] PGD 0 P4D 0 [ +0.000018] Oops: 0000 [#1] PREEMPT SMP NOPTI [ +0.000023] CPU: 38 PID: 7540 Comm: kworker/38:1 Not tainted 6.8.0-rc7 #1 [ +0.000031] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0014.082620210524 08/26/2021 [ +0.000036] Workqueue: ice ice_service_task [ice] [ +0.000183] RIP: 0010:ice_clean_tx_ring+0xa/0xd0 [ice] [...] [ +0.000013] Call Trace: [ +0.000016] [ +0.000014] ? __die+0x1f/0x70 [ +0.000029] ? page_fault_oops+0x171/0x4f0 [ +0.000029] ? schedule+0x3b/0xd0 [ +0.000027] ? exc_page_fault+0x7b/0x180 [ +0.000022] ? asm_exc_page_fault+0x22/0x30 [ +0.000031] ? ice_clean_tx_ring+0xa/0xd0 [ice] [ +0.000194] ice_free_tx_ring+0xe/0x60 [ice] [ +0.000186] ice_destroy_xdp_rings+0x157/0x310 [ice] [ +0.000151] ice_vsi_decfg+0x53/0xe0 [ice] [ +0.000180] ice_vsi_rebuild+0x239/0x540 [ice] [ +0.000186] ice_vsi_rebuild_by_type+0x76/0x180 [ice] [ +0.000145] ice_rebuild+0x18c/0x840 [ice] [ +0.000145] ? delay_tsc+0x4a/0xc0 [ +0.000022] ? delay_tsc+0x92/0xc0 [ +0.000020] ice_do_reset+0x140/0x180 [ice] [ +0.000886] ice_service_task+0x404/0x1030 [ice] [ +0.000824] process_one_work+0x171/0x340 [ +0.000685] worker_thread+0x277/0x3a0 [ +0.000675] ? preempt_count_add+0x6a/0xa0 [ +0.000677] ? _raw_spin_lock_irqsave+0x23/0x50 [ +0.000679] ? __pfx_worker_thread+0x10/0x10 [ +0.000653] kthread+0xf0/0x120 [ +0.000635] ? __pfx_kthread+0x10/0x10 [ +0.000616] ret_from_fork+0x2d/0x50 [ +0.000612] ? __pfx_kthread+0x10/0x10 [ +0.000604] ret_from_fork_asm+0x1b/0x30 [ +0.000604] The previous way of handling this through returning -EBUSY is not viable, particularly when destroying AF_XDP socket, because the kernel proceeds with removal anyway. There is plenty of code between those calls and there is no need to create a large critical section that covers all of them, same as there is no need to protect ice_vsi_rebuild() with rtnl_lock(). Add xdp_state_lock mutex to protect ice_vsi_rebuild() and ice_xdp(). Leaving unprotected sections in between would result in two states that have to be considered: 1. when the VSI is closed, but not yet rebuild 2. when VSI is already rebuild, but not yet open The latter case is actually already handled through !netif_running() case, we just need to adjust flag checking a little. The former one is not as trivial, because between ice_vsi_close() and ice_vsi_rebuild(), a lot of hardware interaction happens, this can make adding/deleting rings exit with an error. Luckily, VSI rebuild is pending and can apply new configuration for us in a managed fashion. Therefore, add an additional VSI state flag ICE_VSI_REBUILD_PENDING to indicate that ice_xdp() can just hot-swap the program. Also, as ice_vsi_rebuild() flow is touched in this patch, make it more consistent by deconfiguring VSI when coalesce allocation fails. Fixes: 2d4238f55697 ("ice: Add support for AF_XDP") Fixes: efc2214b6047 ("ice: Add support for XDP") Reviewed-by: Wojciech Drewek Reviewed-by: Jacob Keller Tested-by: Chandan Kumar Rout Signed-off-by: Larysa Zaremba Reviewed-by: Maciej Fijalkowski Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/ice/ice.h | 2 ++ drivers/net/ethernet/intel/ice/ice_lib.c | 34 ++++++++++++++--------- drivers/net/ethernet/intel/ice/ice_main.c | 19 +++++++++---- drivers/net/ethernet/intel/ice/ice_xsk.c | 3 +- 4 files changed, 39 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index caaa10157909..ce8b5505b16d 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -318,6 +318,7 @@ enum ice_vsi_state { ICE_VSI_UMAC_FLTR_CHANGED, ICE_VSI_MMAC_FLTR_CHANGED, ICE_VSI_PROMISC_CHANGED, + ICE_VSI_REBUILD_PENDING, ICE_VSI_STATE_NBITS /* must be last */ }; @@ -411,6 +412,7 @@ struct ice_vsi { struct ice_tx_ring **xdp_rings; /* XDP ring array */ u16 num_xdp_txq; /* Used XDP queues */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ + struct mutex xdp_state_lock; struct net_device **target_netdevs; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index cdf05e57499f..3e772c014ae3 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -447,6 +447,7 @@ static void ice_vsi_free(struct ice_vsi *vsi) ice_vsi_free_stats(vsi); ice_vsi_free_arrays(vsi); + mutex_destroy(&vsi->xdp_state_lock); mutex_unlock(&pf->sw_mutex); devm_kfree(dev, vsi); } @@ -626,6 +627,8 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, pf->next_vsi); + mutex_init(&vsi->xdp_state_lock); + unlock_pf: mutex_unlock(&pf->sw_mutex); return vsi; @@ -2972,19 +2975,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) return -EINVAL; + mutex_lock(&vsi->xdp_state_lock); + ret = ice_vsi_realloc_stat_arrays(vsi); if (ret) - goto err_vsi_cfg; + goto unlock; ice_vsi_decfg(vsi); ret = ice_vsi_cfg_def(vsi); if (ret) - goto err_vsi_cfg; + goto unlock; coalesce = kcalloc(vsi->num_q_vectors, sizeof(struct ice_coalesce_stored), GFP_KERNEL); - if (!coalesce) - return -ENOMEM; + if (!coalesce) { + ret = -ENOMEM; + goto decfg; + } prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); @@ -2992,22 +2999,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) if (ret) { if (vsi_flags & ICE_VSI_FLAG_INIT) { ret = -EIO; - goto err_vsi_cfg_tc_lan; + goto free_coalesce; } - kfree(coalesce); - return ice_schedule_reset(pf, ICE_RESET_PFR); + ret = ice_schedule_reset(pf, ICE_RESET_PFR); + goto free_coalesce; } ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); - kfree(coalesce); + clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state); - return 0; - -err_vsi_cfg_tc_lan: - ice_vsi_decfg(vsi); +free_coalesce: kfree(coalesce); -err_vsi_cfg: +decfg: + if (ret) + ice_vsi_decfg(vsi); +unlock: + mutex_unlock(&vsi->xdp_state_lock); return ret; } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 0e0086494a54..746cae5964fa 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -617,6 +617,7 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) /* clear SW filtering DB */ ice_clear_hw_tbls(hw); /* disable the VSIs and their queues that are not already DOWN */ + set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state); ice_pf_dis_all_vsi(pf, false); if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) @@ -3017,7 +3018,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, } /* hot swap progs and avoid toggling link */ - if (ice_is_xdp_ena_vsi(vsi) == !!prog) { + if (ice_is_xdp_ena_vsi(vsi) == !!prog || + test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) { ice_vsi_assign_bpf_prog(vsi, prog); return 0; } @@ -3089,21 +3091,28 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct ice_netdev_priv *np = netdev_priv(dev); struct ice_vsi *vsi = np->vsi; + int ret; if (vsi->type != ICE_VSI_PF) { NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); return -EINVAL; } + mutex_lock(&vsi->xdp_state_lock); + switch (xdp->command) { case XDP_SETUP_PROG: - return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); + ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); + break; case XDP_SETUP_XSK_POOL: - return ice_xsk_pool_setup(vsi, xdp->xsk.pool, - xdp->xsk.queue_id); + ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id); + break; default: - return -EINVAL; + ret = -EINVAL; } + + mutex_unlock(&vsi->xdp_state_lock); + return ret; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 240a7bec242b..a659951fa987 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -390,7 +390,8 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) goto failure; } - if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); + if_running = !test_bit(ICE_VSI_DOWN, vsi->state) && + ice_is_xdp_ena_vsi(vsi); if (if_running) { struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; From 6ea67b4790a0466c7a3a091077e43a2d61e50f32 Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Fri, 23 Aug 2024 11:59:29 +0200 Subject: [PATCH 182/374] ice: check ICE_VSI_DOWN under rtnl_lock when preparing for reset [ Upstream commit d8c40b9d3a6cef61eb5a0c58c34a3090ea938d89 ] Consider the following scenario: .ndo_bpf() | ice_prepare_for_reset() | ________________________|_______________________________________| rtnl_lock() | | ice_down() | | | test_bit(ICE_VSI_DOWN) - true | | ice_dis_vsi() returns | ice_up() | | | proceeds to rebuild a running VSI | .ndo_bpf() is not the only rtnl-locked callback that toggles the interface to apply new configuration. Another example is .set_channels(). To avoid the race condition above, act only after reading ICE_VSI_DOWN under rtnl_lock. Fixes: 0f9d5027a749 ("ice: Refactor VSI allocation, deletion and rebuild flow") Reviewed-by: Wojciech Drewek Reviewed-by: Jacob Keller Tested-by: Chandan Kumar Rout Signed-off-by: Larysa Zaremba Reviewed-by: Maciej Fijalkowski Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/ice/ice_lib.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 3e772c014ae3..7076a7738864 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2672,8 +2672,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) */ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) { - if (test_bit(ICE_VSI_DOWN, vsi->state)) - return; + bool already_down = test_bit(ICE_VSI_DOWN, vsi->state); set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); @@ -2681,15 +2680,16 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) if (netif_running(vsi->netdev)) { if (!locked) rtnl_lock(); - - ice_vsi_close(vsi); + already_down = test_bit(ICE_VSI_DOWN, vsi->state); + if (!already_down) + ice_vsi_close(vsi); if (!locked) rtnl_unlock(); - } else { + } else if (!already_down) { ice_vsi_close(vsi); } - } else if (vsi->type == ICE_VSI_CTRL) { + } else if (vsi->type == ICE_VSI_CTRL && !already_down) { ice_vsi_close(vsi); } } From 041cbd1feb43aaa5e99944ef5afe0143735010f2 Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Fri, 23 Aug 2024 11:59:30 +0200 Subject: [PATCH 183/374] ice: remove ICE_CFG_BUSY locking from AF_XDP code [ Upstream commit 7e3b407ccbea3259b8583ccc34807622025e390f ] Locking used in ice_qp_ena() and ice_qp_dis() does pretty much nothing, because ICE_CFG_BUSY is a state flag that is supposed to be set in a PF state, not VSI one. Therefore it does not protect the queue pair from e.g. reset. Remove ICE_CFG_BUSY locking from ice_qp_dis() and ice_qp_ena(). Fixes: 2d4238f55697 ("ice: Add support for AF_XDP") Reviewed-by: Wojciech Drewek Reviewed-by: Jacob Keller Tested-by: Chandan Kumar Rout Reviewed-by: Maciej Fijalkowski Signed-off-by: Larysa Zaremba Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/ice/ice_xsk.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index a659951fa987..87a5427570d7 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -165,7 +165,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) struct ice_q_vector *q_vector; struct ice_tx_ring *tx_ring; struct ice_rx_ring *rx_ring; - int timeout = 50; int fail = 0; int err; @@ -176,13 +175,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) rx_ring = vsi->rx_rings[q_idx]; q_vector = rx_ring->q_vector; - while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) { - timeout--; - if (!timeout) - return -EBUSY; - usleep_range(1000, 2000); - } - synchronize_net(); netif_carrier_off(vsi->netdev); netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); @@ -261,7 +253,6 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); netif_carrier_on(vsi->netdev); } - clear_bit(ICE_CFG_BUSY, vsi->state); return fail; } From fc110a08e83c335c3d84ddd1c26801c5ba085021 Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Fri, 23 Aug 2024 11:59:31 +0200 Subject: [PATCH 184/374] ice: do not bring the VSI up, if it was down before the XDP setup [ Upstream commit 04c7e14e5b0b6227e7b00d7a96ca2f2426ab9171 ] After XDP configuration is completed, we bring the interface up unconditionally, regardless of its state before the call to .ndo_bpf(). Preserve the information whether the interface had to be brought down and later bring it up only in such case. Fixes: efc2214b6047 ("ice: Add support for XDP") Reviewed-by: Wojciech Drewek Reviewed-by: Jacob Keller Tested-by: Chandan Kumar Rout Acked-by: Maciej Fijalkowski Signed-off-by: Larysa Zaremba Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/ice/ice_main.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 746cae5964fa..766f9a466bc3 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3006,8 +3006,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, struct netlink_ext_ack *extack) { unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; - bool if_running = netif_running(vsi->netdev); int ret = 0, xdp_ring_err = 0; + bool if_running; if (prog && !prog->aux->xdp_has_frags) { if (frame_size > ice_max_xdp_frame_size(vsi)) { @@ -3024,8 +3024,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, return 0; } + if_running = netif_running(vsi->netdev) && + !test_and_set_bit(ICE_VSI_DOWN, vsi->state); + /* need to stop netdev while setting up the program for Rx rings */ - if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { + if (if_running) { ret = ice_down(vsi); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); From 720f1548fb13f54a4b8bc1ab42ccbe1a30b83f6d Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Thu, 29 Aug 2024 19:50:55 +0200 Subject: [PATCH 185/374] usbnet: modern method to get random MAC [ Upstream commit bab8eb0dd4cb995caa4a0529d5655531c2ec5e8e ] The driver generates a random MAC once on load and uses it over and over, including on two devices needing a random MAC at the same time. Jakub suggested revamping the driver to the modern API for setting a random MAC rather than fixing the old stuff. The bug is as old as the driver. Signed-off-by: Oliver Neukum Reviewed-by: Simon Horman Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Link: https://patch.msgid.link/20240829175201.670718-1-oneukum@suse.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/usb/usbnet.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 9fd516e8bb10..18eb5ba436df 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -61,9 +61,6 @@ /*-------------------------------------------------------------------------*/ -// randomly generated ethernet address -static u8 node_id [ETH_ALEN]; - /* use ethtool to change the level for any given device */ static int msg_level = -1; module_param (msg_level, int, 0); @@ -1725,7 +1722,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->net = net; strscpy(net->name, "usb%d", sizeof(net->name)); - eth_hw_addr_set(net, node_id); /* rx and tx sides can use different message sizes; * bind() should set rx_urb_size in that case. @@ -1801,9 +1797,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) goto out4; } - /* let userspace know we have a random address */ - if (ether_addr_equal(net->dev_addr, node_id)) - net->addr_assign_type = NET_ADDR_RANDOM; + /* this flags the device for user space */ + if (!is_valid_ether_addr(net->dev_addr)) + eth_hw_addr_random(net); if ((dev->driver_info->flags & FLAG_WLAN) != 0) SET_NETDEV_DEVTYPE(net, &wlan_type); @@ -2211,7 +2207,6 @@ static int __init usbnet_init(void) BUILD_BUG_ON( sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data)); - eth_random_addr(node_id); return 0; } module_init(usbnet_init); From d860f25dee0e9b05154554d1d2746b0b73343b95 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Mon, 2 Sep 2024 03:17:30 -0700 Subject: [PATCH 186/374] net: dqs: Do not use extern for unused dql_group [ Upstream commit 77461c10819103eaee7b33c744174b32a8c78b40 ] When CONFIG_DQL is not enabled, dql_group should be treated as a dead declaration. However, its current extern declaration assumes the linker will ignore it, which is generally true across most compiler and architecture combinations. But in certain cases, the linker still attempts to resolve the extern struct, even when the associated code is dead, resulting in a linking error. For instance the following error in loongarch64: >> loongarch64-linux-ld: net-sysfs.c:(.text+0x589c): undefined reference to `dql_group' Modify the declaration of the dead object to be an empty declaration instead of an extern. This change will prevent the linker from attempting to resolve an undefined reference. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202409012047.eCaOdfQJ-lkp@intel.com/ Fixes: 74293ea1c4db ("net: sysfs: Do not create sysfs for non BQL device") Signed-off-by: Breno Leitao Reviewed-by: Simon Horman Tested-by: Simon Horman # build-tested Link: https://patch.msgid.link/20240902101734.3260455-1-leitao@debian.org Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/core/net-sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index dc91921da4ea..15ad775ddd3c 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1524,7 +1524,7 @@ static const struct attribute_group dql_group = { }; #else /* Fake declaration, all the code using it should be dead */ -extern const struct attribute_group dql_group; +static const struct attribute_group dql_group = {}; #endif /* CONFIG_BQL */ #ifdef CONFIG_XPS From e454476c44524024fd98e3fea1a488c8fca45bea Mon Sep 17 00:00:00 2001 From: Tze-nan Wu Date: Fri, 30 Aug 2024 16:25:17 +0800 Subject: [PATCH 187/374] bpf, net: Fix a potential race in do_sock_getsockopt() [ Upstream commit 33f339a1ba54e56bba57ee9a77c71e385ab4825c ] There's a potential race when `cgroup_bpf_enabled(CGROUP_GETSOCKOPT)` is false during the execution of `BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN`, but becomes true when `BPF_CGROUP_RUN_PROG_GETSOCKOPT` is called. This inconsistency can lead to `BPF_CGROUP_RUN_PROG_GETSOCKOPT` receiving an "-EFAULT" from `__cgroup_bpf_run_filter_getsockopt(max_optlen=0)`. Scenario shown as below: `process A` `process B` ----------- ------------ BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN enable CGROUP_GETSOCKOPT BPF_CGROUP_RUN_PROG_GETSOCKOPT (-EFAULT) To resolve this, remove the `BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN` macro and directly uses `copy_from_sockptr` to ensure that `max_optlen` is always set before `BPF_CGROUP_RUN_PROG_GETSOCKOPT` is invoked. Fixes: 0d01da6afc54 ("bpf: implement getsockopt and setsockopt hooks") Co-developed-by: Yanghui Li Signed-off-by: Yanghui Li Co-developed-by: Cheng-Jui Wang Signed-off-by: Cheng-Jui Wang Signed-off-by: Tze-nan Wu Acked-by: Stanislav Fomichev Acked-by: Alexei Starovoitov Link: https://patch.msgid.link/20240830082518.23243-1-Tze-nan.Wu@mediatek.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- include/linux/bpf-cgroup.h | 9 --------- net/socket.c | 4 ++-- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index fb3c3e7181e6..ce91d9b2acb9 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -390,14 +390,6 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, __ret; \ }) -#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ -({ \ - int __ret = 0; \ - if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ - copy_from_sockptr(&__ret, optlen, sizeof(int)); \ - __ret; \ -}) - #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \ max_optlen, retval) \ ({ \ @@ -518,7 +510,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) -#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ optlen, max_optlen, retval) ({ retval; }) #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \ diff --git a/net/socket.c b/net/socket.c index e416920e9399..b5a003974058 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2350,7 +2350,7 @@ INDIRECT_CALLABLE_DECLARE(bool tcp_bpf_bypass_getsockopt(int level, int do_sock_getsockopt(struct socket *sock, bool compat, int level, int optname, sockptr_t optval, sockptr_t optlen) { - int max_optlen __maybe_unused; + int max_optlen __maybe_unused = 0; const struct proto_ops *ops; int err; @@ -2359,7 +2359,7 @@ int do_sock_getsockopt(struct socket *sock, bool compat, int level, return err; if (!compat) - max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen); + copy_from_sockptr(&max_optlen, optlen, sizeof(int)); ops = READ_ONCE(sock->ops); if (level == SOL_SOCKET) { From c8ffe2d4d37a05ce18c71b87421443c16f8475e5 Mon Sep 17 00:00:00 2001 From: Jeongjun Park Date: Sat, 31 Aug 2024 14:47:02 +0900 Subject: [PATCH 188/374] bpf: add check for invalid name in btf_name_valid_section() [ Upstream commit bb6705c3f93bed2af03d43691743d4c43e3c8e6f ] If the length of the name string is 1 and the value of name[0] is NULL byte, an OOB vulnerability occurs in btf_name_valid_section() and the return value is true, so the invalid name passes the check. To solve this, you need to check if the first position is NULL byte and if the first character is printable. Suggested-by: Eduard Zingerman Fixes: bd70a8fb7ca4 ("bpf: Allow all printable characters in BTF DATASEC names") Signed-off-by: Jeongjun Park Link: https://lore.kernel.org/r/20240831054702.364455-1-aha310510@gmail.com Signed-off-by: Alexei Starovoitov Acked-by: Eduard Zingerman Signed-off-by: Sasha Levin --- kernel/bpf/btf.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index fe360b5b211d..2f157ffbc67c 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -817,9 +817,11 @@ static bool btf_name_valid_section(const struct btf *btf, u32 offset) const char *src = btf_str_by_offset(btf, offset); const char *src_limit; + if (!*src) + return false; + /* set a limit on identifier length */ src_limit = src + KSYM_NAME_LEN; - src++; while (*src && src < src_limit) { if (!isprint(*src)) return false; From 58571ffb784320cf9efe471e3fe8049541e6c66e Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 30 Aug 2024 17:31:07 +0200 Subject: [PATCH 189/374] bareudp: Fix device stats updates. [ Upstream commit 4963d2343af81f493519f9c3ea9f2169eaa7353a ] Bareudp devices update their stats concurrently. Therefore they need proper atomic increments. Fixes: 571912c69f0e ("net: UDP tunnel encapsulation module for tunnelling different protocols like MPLS, IP, NSH etc.") Signed-off-by: Guillaume Nault Reviewed-by: Willem de Bruijn Link: https://patch.msgid.link/04b7b9d0b480158eb3ab4366ec80aa2ab7e41fcb.1725031794.git.gnault@redhat.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/bareudp.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index d5c56ca91b77..7aca0544fb29 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -83,7 +83,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion, sizeof(ipversion))) { - bareudp->dev->stats.rx_dropped++; + DEV_STATS_INC(bareudp->dev, rx_dropped); goto drop; } ipversion >>= 4; @@ -93,7 +93,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) } else if (ipversion == 6 && bareudp->multi_proto_mode) { proto = htons(ETH_P_IPV6); } else { - bareudp->dev->stats.rx_dropped++; + DEV_STATS_INC(bareudp->dev, rx_dropped); goto drop; } } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) { @@ -107,7 +107,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) ipv4_is_multicast(tunnel_hdr->daddr)) { proto = htons(ETH_P_MPLS_MC); } else { - bareudp->dev->stats.rx_dropped++; + DEV_STATS_INC(bareudp->dev, rx_dropped); goto drop; } } else { @@ -123,7 +123,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) (addr_type & IPV6_ADDR_MULTICAST)) { proto = htons(ETH_P_MPLS_MC); } else { - bareudp->dev->stats.rx_dropped++; + DEV_STATS_INC(bareudp->dev, rx_dropped); goto drop; } } @@ -135,7 +135,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) proto, !net_eq(bareudp->net, dev_net(bareudp->dev)))) { - bareudp->dev->stats.rx_dropped++; + DEV_STATS_INC(bareudp->dev, rx_dropped); goto drop; } @@ -143,7 +143,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0); if (!tun_dst) { - bareudp->dev->stats.rx_dropped++; + DEV_STATS_INC(bareudp->dev, rx_dropped); goto drop; } skb_dst_set(skb, &tun_dst->dst); @@ -169,8 +169,8 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) &((struct ipv6hdr *)oiph)->saddr); } if (err > 1) { - ++bareudp->dev->stats.rx_frame_errors; - ++bareudp->dev->stats.rx_errors; + DEV_STATS_INC(bareudp->dev, rx_frame_errors); + DEV_STATS_INC(bareudp->dev, rx_errors); goto drop; } } @@ -467,11 +467,11 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb(skb); if (err == -ELOOP) - dev->stats.collisions++; + DEV_STATS_INC(dev, collisions); else if (err == -ENETUNREACH) - dev->stats.tx_carrier_errors++; + DEV_STATS_INC(dev, tx_carrier_errors); - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); return NETDEV_TX_OK; } From c46cd6aaca81040deaea3500ba75126963294bd9 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Mon, 2 Sep 2024 10:39:27 -0700 Subject: [PATCH 190/374] fou: Fix null-ptr-deref in GRO. [ Upstream commit 7e4196935069947d8b70b09c1660b67b067e75cb ] We observed a null-ptr-deref in fou_gro_receive() while shutting down a host. [0] The NULL pointer is sk->sk_user_data, and the offset 8 is of protocol in struct fou. When fou_release() is called due to netns dismantle or explicit tunnel teardown, udp_tunnel_sock_release() sets NULL to sk->sk_user_data. Then, the tunnel socket is destroyed after a single RCU grace period. So, in-flight udp4_gro_receive() could find the socket and execute the FOU GRO handler, where sk->sk_user_data could be NULL. Let's use rcu_dereference_sk_user_data() in fou_from_sock() and add NULL checks in FOU GRO handlers. [0]: BUG: kernel NULL pointer dereference, address: 0000000000000008 PF: supervisor read access in kernel mode PF: error_code(0x0000) - not-present page PGD 80000001032f4067 P4D 80000001032f4067 PUD 103240067 PMD 0 SMP PTI CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.10.216-204.855.amzn2.x86_64 #1 Hardware name: Amazon EC2 c5.large/, BIOS 1.0 10/16/2017 RIP: 0010:fou_gro_receive (net/ipv4/fou.c:233) [fou] Code: 41 5f c3 cc cc cc cc e8 e7 2e 69 f4 0f 1f 80 00 00 00 00 0f 1f 44 00 00 49 89 f8 41 54 48 89 f7 48 89 d6 49 8b 80 88 02 00 00 <0f> b6 48 08 0f b7 42 4a 66 25 fd fd 80 cc 02 66 89 42 4a 0f b6 42 RSP: 0018:ffffa330c0003d08 EFLAGS: 00010297 RAX: 0000000000000000 RBX: ffff93d9e3a6b900 RCX: 0000000000000010 RDX: ffff93d9e3a6b900 RSI: ffff93d9e3a6b900 RDI: ffff93dac2e24d08 RBP: ffff93d9e3a6b900 R08: ffff93dacbce6400 R09: 0000000000000002 R10: 0000000000000000 R11: ffffffffb5f369b0 R12: ffff93dacbce6400 R13: ffff93dac2e24d08 R14: 0000000000000000 R15: ffffffffb4edd1c0 FS: 0000000000000000(0000) GS:ffff93daee800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000008 CR3: 0000000102140001 CR4: 00000000007706f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: ? show_trace_log_lvl (arch/x86/kernel/dumpstack.c:259) ? __die_body.cold (arch/x86/kernel/dumpstack.c:478 arch/x86/kernel/dumpstack.c:420) ? no_context (arch/x86/mm/fault.c:752) ? exc_page_fault (arch/x86/include/asm/irqflags.h:49 arch/x86/include/asm/irqflags.h:89 arch/x86/mm/fault.c:1435 arch/x86/mm/fault.c:1483) ? asm_exc_page_fault (arch/x86/include/asm/idtentry.h:571) ? fou_gro_receive (net/ipv4/fou.c:233) [fou] udp_gro_receive (include/linux/netdevice.h:2552 net/ipv4/udp_offload.c:559) udp4_gro_receive (net/ipv4/udp_offload.c:604) inet_gro_receive (net/ipv4/af_inet.c:1549 (discriminator 7)) dev_gro_receive (net/core/dev.c:6035 (discriminator 4)) napi_gro_receive (net/core/dev.c:6170) ena_clean_rx_irq (drivers/amazon/net/ena/ena_netdev.c:1558) [ena] ena_io_poll (drivers/amazon/net/ena/ena_netdev.c:1742) [ena] napi_poll (net/core/dev.c:6847) net_rx_action (net/core/dev.c:6917) __do_softirq (arch/x86/include/asm/jump_label.h:25 include/linux/jump_label.h:200 include/trace/events/irq.h:142 kernel/softirq.c:299) asm_call_irq_on_stack (arch/x86/entry/entry_64.S:809) do_softirq_own_stack (arch/x86/include/asm/irq_stack.h:27 arch/x86/include/asm/irq_stack.h:77 arch/x86/kernel/irq_64.c:77) irq_exit_rcu (kernel/softirq.c:393 kernel/softirq.c:423 kernel/softirq.c:435) common_interrupt (arch/x86/kernel/irq.c:239) asm_common_interrupt (arch/x86/include/asm/idtentry.h:626) RIP: 0010:acpi_idle_do_entry (arch/x86/include/asm/irqflags.h:49 arch/x86/include/asm/irqflags.h:89 drivers/acpi/processor_idle.c:114 drivers/acpi/processor_idle.c:575) Code: 8b 15 d1 3c c4 02 ed c3 cc cc cc cc 65 48 8b 04 25 40 ef 01 00 48 8b 00 a8 08 75 eb 0f 1f 44 00 00 0f 00 2d d5 09 55 00 fb f4 c3 cc cc cc cc e9 be fc ff ff 66 66 2e 0f 1f 84 00 00 00 00 00 RSP: 0018:ffffffffb5603e58 EFLAGS: 00000246 RAX: 0000000000004000 RBX: ffff93dac0929c00 RCX: ffff93daee833900 RDX: ffff93daee800000 RSI: ffff93daee87dc00 RDI: ffff93daee87dc64 RBP: 0000000000000001 R08: ffffffffb5e7b6c0 R09: 0000000000000044 R10: ffff93daee831b04 R11: 00000000000001cd R12: 0000000000000001 R13: ffffffffb5e7b740 R14: 0000000000000001 R15: 0000000000000000 ? sched_clock_cpu (kernel/sched/clock.c:371) acpi_idle_enter (drivers/acpi/processor_idle.c:712 (discriminator 3)) cpuidle_enter_state (drivers/cpuidle/cpuidle.c:237) cpuidle_enter (drivers/cpuidle/cpuidle.c:353) cpuidle_idle_call (kernel/sched/idle.c:158 kernel/sched/idle.c:239) do_idle (kernel/sched/idle.c:302) cpu_startup_entry (kernel/sched/idle.c:395 (discriminator 1)) start_kernel (init/main.c:1048) secondary_startup_64_no_verify (arch/x86/kernel/head_64.S:310) Modules linked in: udp_diag tcp_diag inet_diag nft_nat ipip tunnel4 dummy fou ip_tunnel nft_masq nft_chain_nat nf_nat wireguard nft_ct curve25519_x86_64 libcurve25519_generic nf_conntrack libchacha20poly1305 nf_defrag_ipv6 nf_defrag_ipv4 nft_objref chacha_x86_64 nft_counter nf_tables nfnetlink poly1305_x86_64 ip6_udp_tunnel udp_tunnel libchacha crc32_pclmul ghash_clmulni_intel aesni_intel crypto_simd cryptd glue_helper mousedev psmouse button ena ptp pps_core crc32c_intel CR2: 0000000000000008 Fixes: d92283e338f6 ("fou: change to use UDP socket GRO") Reported-by: Alphonse Kurian Signed-off-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20240902173927.62706-1-kuniyu@amazon.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ipv4/fou_core.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c index 0abbc413e0fe..78b869b31492 100644 --- a/net/ipv4/fou_core.c +++ b/net/ipv4/fou_core.c @@ -50,7 +50,7 @@ struct fou_net { static inline struct fou *fou_from_sock(struct sock *sk) { - return sk->sk_user_data; + return rcu_dereference_sk_user_data(sk); } static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len) @@ -233,9 +233,15 @@ static struct sk_buff *fou_gro_receive(struct sock *sk, struct sk_buff *skb) { const struct net_offload __rcu **offloads; - u8 proto = fou_from_sock(sk)->protocol; + struct fou *fou = fou_from_sock(sk); const struct net_offload *ops; struct sk_buff *pp = NULL; + u8 proto; + + if (!fou) + goto out; + + proto = fou->protocol; /* We can clear the encap_mark for FOU as we are essentially doing * one of two possible things. We are either adding an L4 tunnel @@ -263,14 +269,24 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) { const struct net_offload __rcu **offloads; - u8 proto = fou_from_sock(sk)->protocol; + struct fou *fou = fou_from_sock(sk); const struct net_offload *ops; - int err = -ENOSYS; + u8 proto; + int err; + + if (!fou) { + err = -ENOENT; + goto out; + } + + proto = fou->protocol; offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); - if (WARN_ON(!ops || !ops->callbacks.gro_complete)) + if (WARN_ON(!ops || !ops->callbacks.gro_complete)) { + err = -ENOSYS; goto out; + } err = ops->callbacks.gro_complete(skb, nhoff); @@ -320,6 +336,9 @@ static struct sk_buff *gue_gro_receive(struct sock *sk, struct gro_remcsum grc; u8 proto; + if (!fou) + goto out; + skb_gro_remcsum_init(&grc); off = skb_gro_offset(skb); From 7a8d3a25f6b43cf5a7f067a6d19f7186dc710d84 Mon Sep 17 00:00:00 2001 From: Hayes Wang Date: Tue, 3 Sep 2024 14:33:33 +0800 Subject: [PATCH 191/374] r8152: fix the firmware doesn't work [ Upstream commit 8487b4af59d4d7feda4b119dc2d92c67ca25c27e ] generic_ocp_write() asks the parameter "size" must be 4 bytes align. Therefore, write the bp would fail, if the mac->bp_num is odd. Align the size to 4 for fixing it. The way may write an extra bp, but the rtl8152_is_fw_mac_ok() makes sure the value must be 0 for the bp whose index is more than mac->bp_num. That is, there is no influence for the firmware. Besides, I check the return value of generic_ocp_write() to make sure everything is correct. Fixes: e5c266a61186 ("r8152: set bp in bulk") Signed-off-by: Hayes Wang Link: https://patch.msgid.link/20240903063333.4502-1-hayeswang@realtek.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/usb/r8152.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 19df1cd9f072..51d5d4f0a8f9 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -5177,14 +5177,23 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac) data = (u8 *)mac; data += __le16_to_cpu(mac->fw_offset); - generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, data, - type); + if (generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, + data, type) < 0) { + dev_err(&tp->intf->dev, "Write %s fw fail\n", + type ? "PLA" : "USB"); + return; + } ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr), __le16_to_cpu(mac->bp_ba_value)); - generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD, - __le16_to_cpu(mac->bp_num) << 1, mac->bp, type); + if (generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD, + ALIGN(__le16_to_cpu(mac->bp_num) << 1, 4), + mac->bp, type) < 0) { + dev_err(&tp->intf->dev, "Write %s bp fail\n", + type ? "PLA" : "USB"); + return; + } bp_en_addr = __le16_to_cpu(mac->bp_en_addr); if (bp_en_addr) From 7c1ac9e92797689e23c1d7c3f7fc202b08c1c87b Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Tue, 3 Sep 2024 10:19:57 +0200 Subject: [PATCH 192/374] net: bridge: br_fdb_external_learn_add(): always set EXT_LEARN [ Upstream commit bee2ef946d3184e99077be526567d791c473036f ] When userspace wants to take over a fdb entry by setting it as EXTERN_LEARNED, we set both flags BR_FDB_ADDED_BY_EXT_LEARN and BR_FDB_ADDED_BY_USER in br_fdb_external_learn_add(). If the bridge updates the entry later because its port changed, we clear the BR_FDB_ADDED_BY_EXT_LEARN flag, but leave the BR_FDB_ADDED_BY_USER flag set. If userspace then wants to take over the entry again, br_fdb_external_learn_add() sees that BR_FDB_ADDED_BY_USER and skips setting the BR_FDB_ADDED_BY_EXT_LEARN flags, thus silently ignores the update. Fix this by always allowing to set BR_FDB_ADDED_BY_EXT_LEARN regardless if this was a user fdb entry or not. Fixes: 710ae7287737 ("net: bridge: Mark FDB entries that were added by user as such") Signed-off-by: Jonas Gorski Acked-by: Nikolay Aleksandrov Reviewed-by: Ido Schimmel Link: https://patch.msgid.link/20240903081958.29951-1-jonas.gorski@bisdn.de Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/bridge/br_fdb.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index c77591e63841..ad7a42b505ef 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -1469,12 +1469,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, modified = true; } - if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) { + if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) { /* Refresh entry */ fdb->used = jiffies; - } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) { - /* Take over SW learned entry */ - set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags); + } else { modified = true; } From b4c56ed51390eaa61a5157ba2233625a878cdbd9 Mon Sep 17 00:00:00 2001 From: Sean Anderson Date: Tue, 3 Sep 2024 13:51:41 -0400 Subject: [PATCH 193/374] net: xilinx: axienet: Fix race in axienet_stop [ Upstream commit 858430db28a5f5a11f8faa3a6fa805438e6f0851 ] axienet_dma_err_handler can race with axienet_stop in the following manner: CPU 1 CPU 2 ====================== ================== axienet_stop() napi_disable() axienet_dma_stop() axienet_dma_err_handler() napi_disable() axienet_dma_stop() axienet_dma_start() napi_enable() cancel_work_sync() free_irq() Fix this by setting a flag in axienet_stop telling axienet_dma_err_handler not to bother doing anything. I chose not to use disable_work_sync to allow for easier backporting. Signed-off-by: Sean Anderson Fixes: 8a3b7a252dca ("drivers/net/ethernet/xilinx: added Xilinx AXI Ethernet driver") Link: https://patch.msgid.link/20240903175141.4132898-1-sean.anderson@linux.dev Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/xilinx/xilinx_axienet.h | 3 +++ drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 09c9f9787180..1223fcc1a8da 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -436,6 +436,8 @@ struct skbuf_dma_descriptor { * @tx_bytes: TX byte count for statistics * @tx_stat_sync: Synchronization object for TX stats * @dma_err_task: Work structure to process Axi DMA errors + * @stopping: Set when @dma_err_task shouldn't do anything because we are + * about to stop the device. * @tx_irq: Axidma TX IRQ number * @rx_irq: Axidma RX IRQ number * @eth_irq: Ethernet core IRQ number @@ -507,6 +509,7 @@ struct axienet_local { struct u64_stats_sync tx_stat_sync; struct work_struct dma_err_task; + bool stopping; int tx_irq; int rx_irq; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 559c0d60d948..88d7bc2ea713 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1460,6 +1460,7 @@ static int axienet_init_legacy_dma(struct net_device *ndev) struct axienet_local *lp = netdev_priv(ndev); /* Enable worker thread for Axi DMA error handling */ + lp->stopping = false; INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); napi_enable(&lp->napi_rx); @@ -1580,6 +1581,9 @@ static int axienet_stop(struct net_device *ndev) dev_dbg(&ndev->dev, "axienet_close()\n"); if (!lp->use_dmaengine) { + WRITE_ONCE(lp->stopping, true); + flush_work(&lp->dma_err_task); + napi_disable(&lp->napi_tx); napi_disable(&lp->napi_rx); } @@ -2154,6 +2158,10 @@ static void axienet_dma_err_handler(struct work_struct *work) dma_err_task); struct net_device *ndev = lp->ndev; + /* Don't bother if we are going to stop anyway */ + if (READ_ONCE(lp->stopping)) + return; + napi_disable(&lp->napi_tx); napi_disable(&lp->napi_rx); From 5537694733431287e29c1687128e9fcc989cd740 Mon Sep 17 00:00:00 2001 From: Pawel Dembicki Date: Tue, 3 Sep 2024 22:33:41 +0200 Subject: [PATCH 194/374] net: dsa: vsc73xx: fix possible subblocks range of CAPT block [ Upstream commit 8e69c96df771ab469cec278edb47009351de4da6 ] CAPT block (CPU Capture Buffer) have 7 sublocks: 0-3, 4, 6, 7. Function 'vsc73xx_is_addr_valid' allows to use only block 0 at this moment. This patch fix it. Fixes: 05bd97fc559d ("net: dsa: Add Vitesse VSC73xx DSA router driver") Signed-off-by: Pawel Dembicki Reviewed-by: Florian Fainelli Link: https://patch.msgid.link/20240903203340.1518789-1-paweldembicki@gmail.com Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/net/dsa/vitesse-vsc73xx-core.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c index 56bb77dbd28a..cefddcf3cc6f 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-core.c +++ b/drivers/net/dsa/vitesse-vsc73xx-core.c @@ -34,7 +34,7 @@ #define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */ #define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */ #define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */ -#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */ +#define VSC73XX_BLOCK_CAPTURE 0x4 /* Subblocks 0-4, 6, 7 */ #define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */ #define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */ @@ -370,13 +370,19 @@ int vsc73xx_is_addr_valid(u8 block, u8 subblock) break; case VSC73XX_BLOCK_MII: - case VSC73XX_BLOCK_CAPTURE: case VSC73XX_BLOCK_ARBITER: switch (subblock) { case 0 ... 1: return 1; } break; + case VSC73XX_BLOCK_CAPTURE: + switch (subblock) { + case 0 ... 4: + case 6 ... 7: + return 1; + } + break; } return 0; From c61c4a64d24654306219bf268d91840b4c78e6f2 Mon Sep 17 00:00:00 2001 From: Jamie Bainbridge Date: Wed, 4 Sep 2024 16:12:26 +1000 Subject: [PATCH 195/374] selftests: net: enable bind tests [ Upstream commit e4af74a53b7aa865e7fcc104630ebb7a9129b71f ] bind_wildcard is compiled but not run, bind_timewait is not compiled. These two tests complete in a very short time, use the test harness properly, and seem reasonable to enable. The author of the tests confirmed via email that these were intended to be run. Enable these two tests. Fixes: 13715acf8ab5 ("selftest: Add test for bind() conflicts.") Fixes: 2c042e8e54ef ("tcp: Add selftest for bind() and TIME_WAIT.") Signed-off-by: Jamie Bainbridge Reviewed-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/5a009b26cf5fb1ad1512d89c61b37e2fac702323.1725430322.git.jamie.bainbridge@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- tools/testing/selftests/net/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index d9393569d03a..ec5377ffda31 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -84,7 +84,8 @@ TEST_GEN_PROGS += so_incoming_cpu TEST_PROGS += sctp_vrf.sh TEST_GEN_FILES += sctp_hello TEST_GEN_FILES += ip_local_port_range -TEST_GEN_FILES += bind_wildcard +TEST_GEN_PROGS += bind_wildcard +TEST_GEN_PROGS += bind_timewait TEST_PROGS += test_vxlan_mdb.sh TEST_PROGS += test_bridge_neigh_suppress.sh TEST_PROGS += test_vxlan_nolocalbypass.sh From 982e057ee188ab63df40d602c2eee7d62efcdda1 Mon Sep 17 00:00:00 2001 From: Arkadiusz Kubalewski Date: Wed, 4 Sep 2024 15:50:34 +0200 Subject: [PATCH 196/374] tools/net/ynl: fix cli.py --subscribe feature [ Upstream commit 6fda63c45fe8a0870226c13dcce1cc21b7c4d508 ] Execution of command: ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/dpll.yaml / --subscribe "monitor" --sleep 10 fails with: File "/repo/./tools/net/ynl/cli.py", line 109, in main ynl.check_ntf() File "/repo/tools/net/ynl/lib/ynl.py", line 924, in check_ntf op = self.rsp_by_value[nl_msg.cmd()] KeyError: 19 Parsing Generic Netlink notification messages performs lookup for op in the message. The message was not yet decoded, and is not yet considered GenlMsg, thus msg.cmd() returns Generic Netlink family id (19) instead of proper notification command id (i.e.: DPLL_CMD_PIN_CHANGE_NTF=13). Allow the op to be obtained within NetlinkProtocol.decode(..) itself if the op was not passed to the decode function, thus allow parsing of Generic Netlink notifications without causing the failure. Suggested-by: Donald Hunter Link: https://lore.kernel.org/netdev/m2le0n5xpn.fsf@gmail.com/ Fixes: 0a966d606c68 ("tools/net/ynl: Fix extack decoding for directional ops") Signed-off-by: Arkadiusz Kubalewski Reviewed-by: Donald Hunter Link: https://patch.msgid.link/20240904135034.316033-1-arkadiusz.kubalewski@intel.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- tools/net/ynl/lib/ynl.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py index 35e666928119..ed7b6fff6999 100644 --- a/tools/net/ynl/lib/ynl.py +++ b/tools/net/ynl/lib/ynl.py @@ -388,6 +388,8 @@ def _decode(self, nl_msg): def decode(self, ynl, nl_msg, op): msg = self._decode(nl_msg) + if op is None: + op = ynl.rsp_by_value[msg.cmd()] fixed_header_size = ynl._struct_size(op.fixed_header) msg.raw_attrs = NlAttrs(msg.raw, fixed_header_size) return msg @@ -919,8 +921,7 @@ def check_ntf(self): print("Netlink done while checking for ntf!?") continue - op = self.rsp_by_value[nl_msg.cmd()] - decoded = self.nlproto.decode(self, nl_msg, op) + decoded = self.nlproto.decode(self, nl_msg, None) if decoded.cmd() not in self.async_msg_ids: print("Unexpected msg id done while checking for ntf", decoded) continue @@ -978,7 +979,7 @@ def _ops(self, ops): if nl_msg.extack: self._decode_extack(req_msg, op, nl_msg.extack) else: - op = self.rsp_by_value[nl_msg.cmd()] + op = None req_flags = [] if nl_msg.error: From 112fd2f02b308564724b8e81006c254d20945c4b Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 18 Jun 2024 15:12:29 +0530 Subject: [PATCH 197/374] xen: privcmd: Fix possible access to a freed kirqfd instance [ Upstream commit 611ff1b1ae989a7bcce3e2a8e132ee30e968c557 ] Nothing prevents simultaneous ioctl calls to privcmd_irqfd_assign() and privcmd_irqfd_deassign(). If that happens, it is possible that a kirqfd created and added to the irqfds_list by privcmd_irqfd_assign() may get removed by another thread executing privcmd_irqfd_deassign(), while the former is still using it after dropping the locks. This can lead to a situation where an already freed kirqfd instance may be accessed and cause kernel oops. Use SRCU locking to prevent the same, as is done for the KVM implementation for irqfds. Reported-by: Al Viro Suggested-by: Paolo Bonzini Signed-off-by: Viresh Kumar Reviewed-by: Juergen Gross Link: https://lore.kernel.org/r/9e884af1f1f842eacbb7afc5672c8feb4dea7f3f.1718703669.git.viresh.kumar@linaro.org Signed-off-by: Juergen Gross Signed-off-by: Sasha Levin --- drivers/xen/privcmd.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index c9c620e32fa8..39e726d7280e 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -846,6 +847,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, /* Irqfd support */ static struct workqueue_struct *irqfd_cleanup_wq; static DEFINE_SPINLOCK(irqfds_lock); +DEFINE_STATIC_SRCU(irqfds_srcu); static LIST_HEAD(irqfds_list); struct privcmd_kernel_irqfd { @@ -873,6 +875,9 @@ static void irqfd_shutdown(struct work_struct *work) container_of(work, struct privcmd_kernel_irqfd, shutdown); u64 cnt; + /* Make sure irqfd has been initialized in assign path */ + synchronize_srcu(&irqfds_srcu); + eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt); eventfd_ctx_put(kirqfd->eventfd); kfree(kirqfd); @@ -935,7 +940,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) __poll_t events; struct fd f; void *dm_op; - int ret; + int ret, idx; kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL); if (!kirqfd) @@ -981,6 +986,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) } } + idx = srcu_read_lock(&irqfds_srcu); list_add_tail(&kirqfd->list, &irqfds_list); spin_unlock_irqrestore(&irqfds_lock, flags); @@ -992,6 +998,8 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) if (events & EPOLLIN) irqfd_inject(kirqfd); + srcu_read_unlock(&irqfds_srcu, idx); + /* * Do not drop the file until the kirqfd is fully initialized, otherwise * we might race against the EPOLLHUP. From 7f093820c0844c32219e4ee96128109f0f64ab8d Mon Sep 17 00:00:00 2001 From: Richard Fitzgerald Date: Tue, 2 Jul 2024 12:08:09 +0100 Subject: [PATCH 198/374] firmware: cs_dsp: Don't allow writes to read-only controls [ Upstream commit 62412a9357b16a4e39dc582deb2e2a682b92524c ] Add a check to cs_dsp_coeff_write_ctrl() to abort if the control is not writeable. The cs_dsp code originated as an ASoC driver (wm_adsp) where all controls were exported as ALSA controls. It relied on ALSA to enforce the read-only permission. Now that the code has been separated from ALSA/ASoC it must perform its own permission check. This isn't currently causing any problems so there shouldn't be any need to backport this. If the client of cs_dsp exposes the control as an ALSA control, it should set permissions on that ALSA control to protect it. The few uses of cs_dsp_coeff_write_ctrl() inside drivers are for writable controls. Signed-off-by: Richard Fitzgerald Link: https://patch.msgid.link/20240702110809.16836-1-rf@opensource.cirrus.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/firmware/cirrus/cs_dsp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index 8a347b938406..89fd63205a6e 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -796,6 +796,9 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, lockdep_assert_held(&ctl->dsp->pwr_lock); + if (ctl->flags && !(ctl->flags & WMFW_CTL_FLAG_WRITEABLE)) + return -EPERM; + if (len + off * sizeof(u32) > ctl->len) return -EINVAL; From eb21d40a218905d9a187eed195025d5f6cc72fe2 Mon Sep 17 00:00:00 2001 From: Sean Anderson Date: Fri, 28 Jun 2024 16:55:39 -0400 Subject: [PATCH 199/374] phy: zynqmp: Take the phy mutex in xlate [ Upstream commit d79c6840917097285e03a49f709321f5fb972750 ] Take the phy mutex in xlate to protect against concurrent modification/access to gtr_phy. This does not typically cause any issues, since in most systems the phys are only xlated once and thereafter accessed with the phy API (which takes the locks). However, we are about to allow userspace to access phys for debugging, so it's important to avoid any data races. Signed-off-by: Sean Anderson Link: https://lore.kernel.org/r/20240628205540.3098010-5-sean.anderson@linux.dev Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/phy/xilinx/phy-zynqmp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c index d7d12cf3011a..9cf0007cfd64 100644 --- a/drivers/phy/xilinx/phy-zynqmp.c +++ b/drivers/phy/xilinx/phy-zynqmp.c @@ -846,6 +846,7 @@ static struct phy *xpsgtr_xlate(struct device *dev, phy_type = args->args[1]; phy_instance = args->args[2]; + guard(mutex)(>r_phy->phy->mutex); ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance); if (ret < 0) { dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n"); From 85c9fdf75cd8e9dd8da27ffc9d2dddb90e300840 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Amadeusz=20S=C5=82awi=C5=84ski?= Date: Thu, 27 Jun 2024 12:18:40 +0200 Subject: [PATCH 200/374] ASoC: topology: Properly initialize soc_enum values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 8ec2a2643544ce352f012ad3d248163199d05dfc ] soc_tplg_denum_create_values() should properly set its values field. Signed-off-by: Amadeusz Sławiński Link: https://patch.msgid.link/20240627101850.2191513-4-amadeuszx.slawinski@linux.intel.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/soc-topology.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 6951ff7bc61e..73d44dff45d6 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -851,6 +851,8 @@ static int soc_tplg_denum_create_values(struct soc_tplg *tplg, struct soc_enum * se->dobj.control.dvalues[i] = le32_to_cpu(ec->values[i]); } + se->items = le32_to_cpu(ec->items); + se->values = (const unsigned int *)se->dobj.control.dvalues; return 0; } From aa4e90ed8c5dcb3690ef8e19849cc86478aea585 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Tue, 2 Jul 2024 12:13:24 +0200 Subject: [PATCH 201/374] dm init: Handle minors larger than 255 [ Upstream commit 140ce37fd78a629105377e17842465258a5459ef ] dm_parse_device_entry() simply copies the minor number into dmi.dev, but the dev_t format splits the minor number between the lowest 8 bytes and highest 12 bytes. If the minor number is larger than 255, part of it will end up getting treated as the major number Fix this by checking that the minor number is valid and then encoding it as a dev_t. Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka Signed-off-by: Sasha Levin --- drivers/md/dm-init.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c index 2a71bcdba92d..b37bbe762500 100644 --- a/drivers/md/dm-init.c +++ b/drivers/md/dm-init.c @@ -212,8 +212,10 @@ static char __init *dm_parse_device_entry(struct dm_device *dev, char *str) strscpy(dev->dmi.uuid, field[1], sizeof(dev->dmi.uuid)); /* minor */ if (strlen(field[2])) { - if (kstrtoull(field[2], 0, &dev->dmi.dev)) + if (kstrtoull(field[2], 0, &dev->dmi.dev) || + dev->dmi.dev >= (1 << MINORBITS)) return ERR_PTR(-EINVAL); + dev->dmi.dev = huge_encode_dev((dev_t)dev->dmi.dev); dev->dmi.flags |= DM_PERSISTENT_DEV_FLAG; } /* flags */ From 15364a1ae1808324b753424987788e827f27a6e0 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Tue, 18 Jun 2024 16:46:37 +0800 Subject: [PATCH 202/374] cxl/region: Fix a race condition in memory hotplug notifier [ Upstream commit a3483ee7e6a7f2d12b5950246f4e0ef94f4a5df0 ] In the memory hotplug notifier function of the CXL region, cxl_region_perf_attrs_callback(), the node ID is obtained by checking the host address range of the region. However, the address range information is not available when the region is registered in devm_cxl_add_region(). Additionally, this information may be removed or added under the protection of cxl_region_rwsem during runtime. If the memory notifier is called for nodes other than that backed by the region, a race condition may occur, potentially leading to a NULL dereference or an invalid address range. The race condition is addressed by checking the availability of the address range information under the protection of cxl_region_rwsem. To enhance code readability and use guard(), the relevant code has been moved into a newly added function: cxl_region_nid(). Fixes: 067353a46d8c ("cxl/region: Add memory hotplug notifier for cxl region") Signed-off-by: Huang, Ying Cc: Dan Williams Cc: Alison Schofield Cc: Andrew Morton Cc: Jonathan Cameron Cc: Dave Jiang Cc: Bharata B Rao Cc: Alistair Popple Cc: Aneesh Kumar K.V Cc: Davidlohr Bueso Cc: Vishal Verma Cc: Ira Weiny Reviewed-by: Jonathan Cameron Link: https://patch.msgid.link/20240618084639.1419629-2-ying.huang@intel.com Signed-off-by: Dave Jiang Signed-off-by: Sasha Levin --- drivers/cxl/core/region.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 538ebd5a64fd..cd9ccdc6bc81 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -2386,14 +2386,25 @@ static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid) return true; } +static int cxl_region_nid(struct cxl_region *cxlr) +{ + struct cxl_region_params *p = &cxlr->params; + struct cxl_endpoint_decoder *cxled; + struct cxl_decoder *cxld; + + guard(rwsem_read)(&cxl_region_rwsem); + cxled = p->targets[0]; + if (!cxled) + return NUMA_NO_NODE; + cxld = &cxled->cxld; + return phys_to_target_node(cxld->hpa_range.start); +} + static int cxl_region_perf_attrs_callback(struct notifier_block *nb, unsigned long action, void *arg) { struct cxl_region *cxlr = container_of(nb, struct cxl_region, memory_notifier); - struct cxl_region_params *p = &cxlr->params; - struct cxl_endpoint_decoder *cxled = p->targets[0]; - struct cxl_decoder *cxld = &cxled->cxld; struct memory_notify *mnb = arg; int nid = mnb->status_change_nid; int region_nid; @@ -2401,7 +2412,7 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb, if (nid == NUMA_NO_NODE || action != MEM_ONLINE) return NOTIFY_DONE; - region_nid = phys_to_target_node(cxld->hpa_range.start); + region_nid = cxl_region_nid(cxlr); if (nid != region_nid) return NOTIFY_DONE; From 81f8c43813712ec92132ddfdde8f3fe896a0a6ef Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Tue, 2 Jul 2024 21:08:33 +0800 Subject: [PATCH 203/374] iommu/vt-d: Handle volatile descriptor status read [ Upstream commit b5e86a95541cea737394a1da967df4cd4d8f7182 ] Queued invalidation wait descriptor status is volatile in that IOMMU hardware writes the data upon completion. Use READ_ONCE() to prevent compiler optimizations which ensures memory reads every time. As a side effect, READ_ONCE() also enforces strict types and may add an extra instruction. But it should not have negative performance impact since we use cpu_relax anyway and the extra time(by adding an instruction) may allow IOMMU HW request cacheline ownership easier. e.g. gcc 12.3 BEFORE: 81 38 ad de 00 00 cmpl $0x2,(%rax) AFTER (with READ_ONCE()) 772f: 8b 00 mov (%rax),%eax 7731: 3d ad de 00 00 cmp $0x2,%eax //status data is 32 bit Signed-off-by: Jacob Pan Reviewed-by: Kevin Tian Reviewed-by: Yi Liu Link: https://lore.kernel.org/r/20240607173817.3914600-1-jacob.jun.pan@linux.intel.com Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20240702130839.108139-2-baolu.lu@linux.intel.com Signed-off-by: Will Deacon Signed-off-by: Sasha Levin --- drivers/iommu/intel/dmar.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 304e84949ca7..1c8d3141cb55 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1446,7 +1446,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, */ writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG); - while (qi->desc_status[wait_index] != QI_DONE) { + while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) { /* * We will leave the interrupts disabled, to prevent interrupt * context to queue another cmd while a cmd is already submitted From 08205763198a7e04a1357b4c51ddeab80628da00 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 2 Jul 2024 21:08:35 +0800 Subject: [PATCH 204/374] iommu/vt-d: Remove control over Execute-Requested requests [ Upstream commit e995fcde6070f0981e083c1e2e17e401e6c17ad9 ] The VT-d specification has removed architectural support of the requests with pasid with a value of 1 for Execute-Requested (ER). And the NXE bit in the pasid table entry and XD bit in the first-stage paging Entries are deprecated accordingly. Remove the programming of these bits to make it consistent with the spec. Suggested-by: Jacob Pan Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240624032351.249858-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20240702130839.108139-4-baolu.lu@linux.intel.com Signed-off-by: Will Deacon Signed-off-by: Sasha Levin --- drivers/iommu/intel/iommu.c | 4 ++-- drivers/iommu/intel/iommu.h | 6 ++---- drivers/iommu/intel/pasid.c | 1 - drivers/iommu/intel/pasid.h | 10 ---------- 4 files changed, 4 insertions(+), 17 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index f55ec1fd7942..e9bea0305c26 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -854,7 +854,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; if (domain->use_first_level) - pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; + pteval |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; tmp = 0ULL; if (!try_cmpxchg64(&pte->val, &tmp, pteval)) @@ -1872,7 +1872,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP); attr |= DMA_FL_PTE_PRESENT; if (domain->use_first_level) { - attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; + attr |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; if (prot & DMA_PTE_WRITE) attr |= DMA_FL_PTE_DIRTY; } diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index eaf015b4353b..9a3b064126de 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -49,7 +49,6 @@ #define DMA_FL_PTE_US BIT_ULL(2) #define DMA_FL_PTE_ACCESS BIT_ULL(5) #define DMA_FL_PTE_DIRTY BIT_ULL(6) -#define DMA_FL_PTE_XD BIT_ULL(63) #define DMA_SL_PTE_DIRTY_BIT 9 #define DMA_SL_PTE_DIRTY BIT_ULL(DMA_SL_PTE_DIRTY_BIT) @@ -831,11 +830,10 @@ static inline void dma_clear_pte(struct dma_pte *pte) static inline u64 dma_pte_addr(struct dma_pte *pte) { #ifdef CONFIG_64BIT - return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD); + return pte->val & VTD_PAGE_MASK; #else /* Must have a full atomic 64-bit read */ - return __cmpxchg64(&pte->val, 0ULL, 0ULL) & - VTD_PAGE_MASK & (~DMA_FL_PTE_XD); + return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; #endif } diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index abce19e2ad6f..aabcdf756581 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -333,7 +333,6 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, pasid_set_domain_id(pte, did); pasid_set_address_width(pte, iommu->agaw); pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); - pasid_set_nxe(pte); /* Setup Present and PASID Granular Transfer Type: */ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY); diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index da9978fef7ac..dde6d3ba5ae0 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -247,16 +247,6 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value) pasid_set_bits(&pe->val[1], 1 << 23, value << 23); } -/* - * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID - * entry. It is required when XD bit of the first level page table - * entry is about to be set. - */ -static inline void pasid_set_nxe(struct pasid_entry *pe) -{ - pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5); -} - /* * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode * PASID entry. From 64149da0fddbbfe43e11c0348d8c8b4171dae3a2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 2 Jul 2024 17:10:21 +0200 Subject: [PATCH 205/374] block: don't call bio_uninit from bio_endio [ Upstream commit bf4c89fc8797f5c0964a0c3d561fbe7e8483b62f ] Commit b222dd2fdd53 ("block: call bio_uninit in bio_endio") added a call to bio_uninit in bio_endio to work around callers that use bio_init but fail to call bio_uninit after they are done to release the resources. While this is an abuse of the bio_init API we still have quite a few of those left. But this early uninit causes a problem for integrity data, as at least some users need the bio_integrity_payload. Right now the only one is the NVMe passthrough which archives this by adding a special case to skip the freeing if the BIP_INTEGRITY_USER flag is set. Sort this out by only putting bi_blkg in bio_endio as that is the cause of the actual leaks - the few users of the crypto context and integrity data all properly call bio_uninit, usually through bio_put for dynamically allocated bios. Signed-off-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240702151047.1746127-4-hch@lst.de Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- block/bio.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/block/bio.c b/block/bio.c index e9e809a63c59..c7a4bc05c43e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1630,8 +1630,18 @@ void bio_endio(struct bio *bio) goto again; } - /* release cgroup info */ - bio_uninit(bio); +#ifdef CONFIG_BLK_CGROUP + /* + * Release cgroup info. We shouldn't have to do this here, but quite + * a few callers of bio_init fail to call bio_uninit, so we cover up + * for that here at least for now. + */ + if (bio->bi_blkg) { + blkg_put(bio->bi_blkg); + bio->bi_blkg = NULL; + } +#endif + if (bio->bi_end_io) bio->bi_end_io(bio); } From 1200485653a0e58e8284764f24b3959426b33843 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Wed, 3 Jul 2024 14:52:29 -0400 Subject: [PATCH 206/374] cgroup: Protect css->cgroup write under css_set_lock [ Upstream commit 57b56d16800e8961278ecff0dc755d46c4575092 ] The writing of css->cgroup associated with the cgroup root in rebind_subsystems() is currently protected only by cgroup_mutex. However, the reading of css->cgroup in both proc_cpuset_show() and proc_cgroup_show() is protected just by css_set_lock. That makes the readers susceptible to racing problems like data tearing or caching. It is also a problem that can be reported by KCSAN. This can be fixed by using READ_ONCE() and WRITE_ONCE() to access css->cgroup. Alternatively, the writing of css->cgroup can be moved under css_set_lock as well which is done by this patch. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo Signed-off-by: Sasha Levin --- kernel/cgroup/cgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index e32b6972c478..278889170f94 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1839,9 +1839,9 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) RCU_INIT_POINTER(scgrp->subsys[ssid], NULL); rcu_assign_pointer(dcgrp->subsys[ssid], css); ss->root = dst_root; - css->cgroup = dcgrp; spin_lock_irq(&css_set_lock); + css->cgroup = dcgrp; WARN_ON(!list_empty(&dcgrp->e_csets[ss->id])); list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id], e_cset_node[ss->id]) { From fc843d3837ebcb1c16d3768ef3eb55e25d5331f2 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 3 Jul 2024 17:22:36 +0200 Subject: [PATCH 207/374] um: line: always fill *error_out in setup_one_line() [ Upstream commit 824ac4a5edd3f7494ab1996826c4f47f8ef0f63d ] The pointer isn't initialized by callers, but I have encountered cases where it's still printed; initialize it in all possible cases in setup_one_line(). Link: https://patch.msgid.link/20240703172235.ad863568b55f.Iaa1eba4db8265d7715ba71d5f6bb8c7ff63d27e9@changeid Acked-By: Anton Ivanov Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin --- arch/um/drivers/line.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index d82bc3fdb86e..43d8959cc746 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -383,6 +383,7 @@ int setup_one_line(struct line *lines, int n, char *init, parse_chan_pair(NULL, line, n, opts, error_out); err = 0; } + *error_out = "configured as 'none'"; } else { char *new = kstrdup(init, GFP_KERNEL); if (!new) { @@ -406,6 +407,7 @@ int setup_one_line(struct line *lines, int n, char *init, } } if (err) { + *error_out = "failed to parse channel pair"; line->init_str = NULL; line->valid = 0; kfree(new); From 2962197b3e92159d6d8a1d965eedc4016b8fe59e Mon Sep 17 00:00:00 2001 From: Zijun Hu Date: Tue, 2 Jul 2024 22:51:52 +0800 Subject: [PATCH 208/374] devres: Initialize an uninitialized struct member [ Upstream commit 56a20ad349b5c51909cf8810f7c79b288864ad33 ] Initialize an uninitialized struct member for driver API devres_open_group(). Signed-off-by: Zijun Hu Link: https://lore.kernel.org/r/1719931914-19035-4-git-send-email-quic_zijuhu@quicinc.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/base/devres.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 8d709dbd4e0c..e9b0d94aeabd 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -567,6 +567,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp) grp->id = grp; if (id) grp->id = id; + grp->color = 0; spin_lock_irqsave(&dev->devres_lock, flags); add_dr(dev, &grp->node[0]); From bfc44075b19740d372f989f21dd03168bfda0689 Mon Sep 17 00:00:00 2001 From: Krishna Kumar Date: Mon, 1 Jul 2024 13:15:06 +0530 Subject: [PATCH 209/374] pci/hotplug/pnv_php: Fix hotplug driver crash on Powernv [ Upstream commit 335e35b748527f0c06ded9eebb65387f60647fda ] The hotplug driver for powerpc (pci/hotplug/pnv_php.c) causes a kernel crash when we try to hot-unplug/disable the PCIe switch/bridge from the PHB. The crash occurs because although the MSI data structure has been released during disable/hot-unplug path and it has been assigned with NULL, still during unregistration the code was again trying to explicitly disable the MSI which causes the NULL pointer dereference and kernel crash. The patch fixes the check during unregistration path to prevent invoking pci_disable_msi/msix() since its data structure is already freed. Reported-by: Timothy Pearson Closes: https://lore.kernel.org/all/1981605666.2142272.1703742465927.JavaMail.zimbra@raptorengineeringinc.com/ Acked-by: Bjorn Helgaas Tested-by: Shawn Anastasio Signed-off-by: Krishna Kumar Signed-off-by: Michael Ellerman Link: https://msgid.link/20240701074513.94873-2-krishnak@linux.ibm.com Signed-off-by: Sasha Levin --- drivers/pci/hotplug/pnv_php.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c index 694349be9d0a..573a41869c15 100644 --- a/drivers/pci/hotplug/pnv_php.c +++ b/drivers/pci/hotplug/pnv_php.c @@ -40,7 +40,6 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot, bool disable_device) { struct pci_dev *pdev = php_slot->pdev; - int irq = php_slot->irq; u16 ctrl; if (php_slot->irq > 0) { @@ -59,7 +58,7 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot, php_slot->wq = NULL; } - if (disable_device || irq > 0) { + if (disable_device) { if (pdev->msix_enabled) pci_disable_msix(pdev); else if (pdev->msi_enabled) From 2745d99da13110648ea99c1dff8b333261e97c4d Mon Sep 17 00:00:00 2001 From: Xuan Zhuo Date: Thu, 6 Jun 2024 19:13:45 +0800 Subject: [PATCH 210/374] virtio_ring: fix KMSAN error for premapped mode [ Upstream commit 840b2d39a2dc1b96deb3f5c7fef76c9b24f08f51 ] Add kmsan for virtqueue_dma_map_single_attrs to fix: BUG: KMSAN: uninit-value in receive_buf+0x45ca/0x6990 receive_buf+0x45ca/0x6990 virtnet_poll+0x17e0/0x3130 net_rx_action+0x832/0x26e0 handle_softirqs+0x330/0x10f0 [...] Uninit was created at: __alloc_pages_noprof+0x62a/0xe60 alloc_pages_noprof+0x392/0x830 skb_page_frag_refill+0x21a/0x5c0 virtnet_rq_alloc+0x50/0x1500 try_fill_recv+0x372/0x54c0 virtnet_open+0x210/0xbe0 __dev_open+0x56e/0x920 __dev_change_flags+0x39c/0x2000 dev_change_flags+0xaa/0x200 do_setlink+0x197a/0x7420 rtnl_setlink+0x77c/0x860 [...] Signed-off-by: Xuan Zhuo Tested-by: Alexander Potapenko Message-Id: <20240606111345.93600-1-xuanzhuo@linux.alibaba.com> Signed-off-by: Michael S. Tsirkin Tested-by: Ilya Leoshkevich # s390x Acked-by: Jason Wang Signed-off-by: Sasha Levin --- drivers/virtio/virtio_ring.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 2a972752ff1b..9d3a9942c8c8 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -3121,8 +3121,10 @@ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, { struct vring_virtqueue *vq = to_vvq(_vq); - if (!vq->use_dma_api) + if (!vq->use_dma_api) { + kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir); return (dma_addr_t)virt_to_phys(ptr); + } return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs); } From 25eaef533bf3ccc6fee5067aac16f41f280e343e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcin=20=C5=9Alusarz?= Date: Tue, 28 May 2024 13:02:46 +0200 Subject: [PATCH 211/374] wifi: rtw88: usb: schedule rx work after everything is set up MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit adc539784c98a7cc602cbf557debfc2e7b9be8b3 ] Right now it's possible to hit NULL pointer dereference in rtw_rx_fill_rx_status on hw object and/or its fields because initialization routine can start getting USB replies before rtw_dev is fully setup. The stack trace looks like this: rtw_rx_fill_rx_status rtw8821c_query_rx_desc rtw_usb_rx_handler ... queue_work rtw_usb_read_port_complete ... usb_submit_urb rtw_usb_rx_resubmit rtw_usb_init_rx rtw_usb_probe So while we do the async stuff rtw_usb_probe continues and calls rtw_register_hw, which does all kinds of initialization (e.g. via ieee80211_register_hw) that rtw_rx_fill_rx_status relies on. Fix this by moving the first usb_submit_urb after everything is set up. For me, this bug manifested as: [ 8.893177] rtw_8821cu 1-1:1.2: band wrong, packet dropped [ 8.910904] rtw_8821cu 1-1:1.2: hw->conf.chandef.chan NULL in rtw_rx_fill_rx_status because I'm using Larry's backport of rtw88 driver with the NULL checks in rtw_rx_fill_rx_status. Link: https://lore.kernel.org/linux-wireless/CA+shoWQ7P49jhQasofDcTdQhiuarPTjYEDa--NiVVx494WcuQw@mail.gmail.com/ Signed-off-by: Marcin Ślusarz Cc: Tim K Cc: Ping-Ke Shih Cc: Larry Finger Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ping-Ke Shih Link: https://patch.msgid.link/20240528110246.477321-1-marcin.slusarz@gmail.com Signed-off-by: Sasha Levin --- drivers/net/wireless/realtek/rtw88/usb.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index 0001a1ab6f38..edc1507514f6 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -744,7 +744,6 @@ static struct rtw_hci_ops rtw_usb_ops = { static int rtw_usb_init_rx(struct rtw_dev *rtwdev) { struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); - int i; rtwusb->rxwq = create_singlethread_workqueue("rtw88_usb: rx wq"); if (!rtwusb->rxwq) { @@ -756,13 +755,19 @@ static int rtw_usb_init_rx(struct rtw_dev *rtwdev) INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler); + return 0; +} + +static void rtw_usb_setup_rx(struct rtw_dev *rtwdev) +{ + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); + int i; + for (i = 0; i < RTW_USB_RXCB_NUM; i++) { struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i]; rtw_usb_rx_resubmit(rtwusb, rxcb); } - - return 0; } static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev) @@ -899,6 +904,8 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) goto err_destroy_rxwq; } + rtw_usb_setup_rx(rtwdev); + return 0; err_destroy_rxwq: From 3844586e9bd9845140e1078f1e61896b576ac536 Mon Sep 17 00:00:00 2001 From: Kyoungrul Kim Date: Thu, 27 Jun 2024 17:51:04 +0900 Subject: [PATCH 212/374] scsi: ufs: core: Remove SCSI host only if added [ Upstream commit 7cbff570dbe8907e23bba06f6414899a0fbb2fcc ] If host tries to remove ufshcd driver from a UFS device it would cause a kernel panic if ufshcd_async_scan fails during ufshcd_probe_hba before adding a SCSI host with scsi_add_host and MCQ is enabled since SCSI host has been defered after MCQ configuration introduced by commit 0cab4023ec7b ("scsi: ufs: core: Defer adding host to SCSI if MCQ is supported"). To guarantee that SCSI host is removed only if it has been added, set the scsi_host_added flag to true after adding a SCSI host and check whether it is set or not before removing it. Signed-off-by: Kyoungrul Kim Signed-off-by: Minwoo Im Link: https://lore.kernel.org/r/20240627085104epcms2p5897a3870ea5c6416aa44f94df6c543d7@epcms2p5 Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/ufs/core/ufshcd.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 91bfdc17eedb..b9c436a002a1 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -10196,7 +10196,8 @@ void ufshcd_remove(struct ufs_hba *hba) blk_mq_destroy_queue(hba->tmf_queue); blk_put_queue(hba->tmf_queue); blk_mq_free_tag_set(&hba->tmf_tag_set); - scsi_remove_host(hba->host); + if (hba->scsi_host_added) + scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba); @@ -10478,6 +10479,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) dev_err(hba->dev, "scsi_add_host failed\n"); goto out_disable; } + hba->scsi_host_added = true; } hba->tmf_tag_set = (struct blk_mq_tag_set) { @@ -10560,7 +10562,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) free_tmf_tag_set: blk_mq_free_tag_set(&hba->tmf_tag_set); out_remove_scsi_host: - scsi_remove_host(hba->host); + if (hba->scsi_host_added) + scsi_remove_host(hba->host); out_disable: hba->is_irq_enabled = false; ufshcd_hba_exit(hba); From f14d3e1aa613311c744af32d75125e95fc8ffb84 Mon Sep 17 00:00:00 2001 From: Igor Pylypiv Date: Thu, 27 Jun 2024 15:59:23 +0000 Subject: [PATCH 213/374] scsi: pm80xx: Set phy->enable_completion only when we wait for it [ Upstream commit e4f949ef1516c0d74745ee54a0f4882c1f6c7aea ] pm8001_phy_control() populates the enable_completion pointer with a stack address, sends a PHY_LINK_RESET / PHY_HARD_RESET, waits 300 ms, and returns. The problem arises when a phy control response comes late. After 300 ms the pm8001_phy_control() function returns and the passed enable_completion stack address is no longer valid. Late phy control response invokes complete() on a dangling enable_completion pointer which leads to a kernel crash. Signed-off-by: Igor Pylypiv Signed-off-by: Terrence Adams Link: https://lore.kernel.org/r/20240627155924.2361370-2-tadamsjr@google.com Acked-by: Jack Wang Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/pm8001/pm8001_sas.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index a5a31dfa4512..ee2da8e49d4c 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -166,7 +166,6 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, unsigned long flags; pm8001_ha = sas_phy->ha->lldd_ha; phy = &pm8001_ha->phy[phy_id]; - pm8001_ha->phy[phy_id].enable_completion = &completion; if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { /* @@ -190,6 +189,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, rates->maximum_linkrate; } if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { + pm8001_ha->phy[phy_id].enable_completion = &completion; PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); wait_for_completion(&completion); } @@ -198,6 +198,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_HARD_RESET: if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { + pm8001_ha->phy[phy_id].enable_completion = &completion; PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); wait_for_completion(&completion); } @@ -206,6 +207,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_LINK_RESET: if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { + pm8001_ha->phy[phy_id].enable_completion = &completion; PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); wait_for_completion(&completion); } From bba47fe3b038cca3d3ebd799665ce69d6d273b58 Mon Sep 17 00:00:00 2001 From: Justin Tee Date: Fri, 28 Jun 2024 10:20:08 -0700 Subject: [PATCH 214/374] scsi: lpfc: Handle mailbox timeouts in lpfc_get_sfp_info [ Upstream commit ede596b1434b57c0b3fd5c02b326efe5c54f6e48 ] The MBX_TIMEOUT return code is not handled in lpfc_get_sfp_info and the routine unconditionally frees submitted mailbox commands regardless of return status. The issue is that for MBX_TIMEOUT cases, when firmware returns SFP information at a later time, that same mailbox memory region references previously freed memory in its cmpl routine. Fix by adding checks for the MBX_TIMEOUT return code. During mailbox resource cleanup, check the mbox flag to make sure that the wait did not timeout. If the MBOX_WAKE flag is not set, then do not free the resources because it will be freed when firmware completes the mailbox at a later time in its cmpl routine. Also, increase the timeout from 30 to 60 seconds to accommodate boot scripts requiring longer timeouts. Signed-off-by: Justin Tee Link: https://lore.kernel.org/r/20240628172011.25921-6-justintee8345@gmail.com Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/lpfc/lpfc_els.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index c32bc773ab29..445cb6c2e80f 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -7302,13 +7302,13 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); } mbox->vport = phba->pport; - - rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO); if (rc == MBX_NOT_FINISHED) { rc = 1; goto error; } - + if (rc == MBX_TIMEOUT) + goto error; if (phba->sli_rev == LPFC_SLI_REV4) mp = mbox->ctx_buf; else @@ -7361,7 +7361,10 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); } - rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO); + + if (rc == MBX_TIMEOUT) + goto error; if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { rc = 1; goto error; @@ -7372,8 +7375,10 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, DMP_SFF_PAGE_A2_SIZE); error: - mbox->ctx_buf = mpsave; - lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + if (mbox->mbox_flag & LPFC_MBX_WAKE) { + mbox->ctx_buf = mpsave; + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + } return rc; From 8f2dabd2eef15b52058fcca2ad5258e1125f905b Mon Sep 17 00:00:00 2001 From: Hareshx Sankar Raj Date: Tue, 25 Jun 2024 15:41:19 +0100 Subject: [PATCH 215/374] crypto: qat - fix unintentional re-enabling of error interrupts [ Upstream commit f0622894c59458fceb33c4197462bc2006f3fc6b ] The logic that detects pending VF2PF interrupts unintentionally clears the section of the error mask register(s) not related to VF2PF. This might cause interrupts unrelated to VF2PF, reported through errsou3 and errsou5, to be reported again after the execution of the function disable_pending_vf2pf_interrupts() in dh895xcc and GEN2 devices. Fix by updating only section of errmsk3 and errmsk5 related to VF2PF. Signed-off-by: Hareshx Sankar Raj Reviewed-by: Damian Muszynski Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin --- drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c | 4 +++- .../crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 8 ++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c index 70ef11963938..43af81fcab86 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c @@ -100,7 +100,9 @@ static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr) errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK); ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); - errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled); + /* Update only section of errmsk3 related to VF2PF */ + errmsk3 &= ~ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK); + errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled); ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); /* Return the sources of the (new) interrupt(s) */ diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 6e24d57e6b98..c0661ff5e929 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -193,8 +193,12 @@ static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr) ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5); - errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled); - errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled); + /* Update only section of errmsk3 and errmsk5 related to VF2PF */ + errmsk3 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK); + errmsk5 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK); + + errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled); + errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled); ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5); From e74ba5e3056ff426459bc4caa4941c5249b3d12b Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Fri, 5 Jul 2024 16:11:25 +0900 Subject: [PATCH 216/374] tracing/kprobes: Add symbol counting check when module loads [ Upstream commit 9d8616034f161222a4ac166c1b42b6d79961c005 ] Currently, kprobe event checks whether the target symbol name is unique or not, so that it does not put a probe on an unexpected place. But this skips the check if the target is on a module because the module may not be loaded. To fix this issue, this patch checks the number of probe target symbols in a target module when the module is loaded. If the probe is not on the unique name symbols in the module, it will be rejected at that point. Note that the symbol which has a unique name in the target module, it will be accepted even if there are same-name symbols in the kernel or other modules, Link: https://lore.kernel.org/all/172016348553.99543.2834679315611882137.stgit@devnote2/ Signed-off-by: Masami Hiramatsu (Google) Reviewed-by: Steven Rostedt (Google) Signed-off-by: Sasha Levin --- kernel/trace/trace_kprobe.c | 125 +++++++++++++++++++++++------------- 1 file changed, 81 insertions(+), 44 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 16383247bdbf..0d88922f8763 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -678,6 +678,21 @@ static int register_trace_kprobe(struct trace_kprobe *tk) } #ifdef CONFIG_MODULES +static int validate_module_probe_symbol(const char *modname, const char *symbol); + +static int register_module_trace_kprobe(struct module *mod, struct trace_kprobe *tk) +{ + const char *p; + int ret = 0; + + p = strchr(trace_kprobe_symbol(tk), ':'); + if (p) + ret = validate_module_probe_symbol(module_name(mod), p + 1); + if (!ret) + ret = __register_trace_kprobe(tk); + return ret; +} + /* Module notifier call back, checking event on the module */ static int trace_kprobe_module_callback(struct notifier_block *nb, unsigned long val, void *data) @@ -696,7 +711,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb, if (trace_kprobe_within_module(tk, mod)) { /* Don't need to check busy - this should have gone. */ __unregister_trace_kprobe(tk); - ret = __register_trace_kprobe(tk); + ret = register_module_trace_kprobe(mod, tk); if (ret) pr_warn("Failed to re-register probe %s on %s: %d\n", trace_probe_name(&tk->tp), @@ -747,17 +762,68 @@ static int count_mod_symbols(void *data, const char *name, unsigned long unused) return 0; } -static unsigned int number_of_same_symbols(char *func_name) +static unsigned int number_of_same_symbols(const char *mod, const char *func_name) { struct sym_count_ctx ctx = { .count = 0, .name = func_name }; - kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count); + if (!mod) + kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count); - module_kallsyms_on_each_symbol(NULL, count_mod_symbols, &ctx); + module_kallsyms_on_each_symbol(mod, count_mod_symbols, &ctx); return ctx.count; } +static int validate_module_probe_symbol(const char *modname, const char *symbol) +{ + unsigned int count = number_of_same_symbols(modname, symbol); + + if (count > 1) { + /* + * Users should use ADDR to remove the ambiguity of + * using KSYM only. + */ + return -EADDRNOTAVAIL; + } else if (count == 0) { + /* + * We can return ENOENT earlier than when register the + * kprobe. + */ + return -ENOENT; + } + return 0; +} + +static int validate_probe_symbol(char *symbol) +{ + struct module *mod = NULL; + char *modname = NULL, *p; + int ret = 0; + + p = strchr(symbol, ':'); + if (p) { + modname = symbol; + symbol = p + 1; + *p = '\0'; + /* Return 0 (defer) if the module does not exist yet. */ + rcu_read_lock_sched(); + mod = find_module(modname); + if (mod && !try_module_get(mod)) + mod = NULL; + rcu_read_unlock_sched(); + if (!mod) + goto out; + } + + ret = validate_module_probe_symbol(modname, symbol); +out: + if (p) + *p = ':'; + if (mod) + module_put(mod); + return ret; +} + static int trace_kprobe_entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs); @@ -881,6 +947,14 @@ static int __trace_kprobe_create(int argc, const char *argv[]) trace_probe_log_err(0, BAD_PROBE_ADDR); goto parse_error; } + ret = validate_probe_symbol(symbol); + if (ret) { + if (ret == -EADDRNOTAVAIL) + trace_probe_log_err(0, NON_UNIQ_SYMBOL); + else + trace_probe_log_err(0, BAD_PROBE_ADDR); + goto parse_error; + } if (is_return) ctx.flags |= TPARG_FL_RETURN; ret = kprobe_on_func_entry(NULL, symbol, offset); @@ -893,31 +967,6 @@ static int __trace_kprobe_create(int argc, const char *argv[]) } } - if (symbol && !strchr(symbol, ':')) { - unsigned int count; - - count = number_of_same_symbols(symbol); - if (count > 1) { - /* - * Users should use ADDR to remove the ambiguity of - * using KSYM only. - */ - trace_probe_log_err(0, NON_UNIQ_SYMBOL); - ret = -EADDRNOTAVAIL; - - goto error; - } else if (count == 0) { - /* - * We can return ENOENT earlier than when register the - * kprobe. - */ - trace_probe_log_err(0, BAD_PROBE_ADDR); - ret = -ENOENT; - - goto error; - } - } - trace_probe_log_set_index(0); if (event) { ret = traceprobe_parse_event_name(&event, &group, gbuf, @@ -1835,21 +1884,9 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs, char *event; if (func) { - unsigned int count; - - count = number_of_same_symbols(func); - if (count > 1) - /* - * Users should use addr to remove the ambiguity of - * using func only. - */ - return ERR_PTR(-EADDRNOTAVAIL); - else if (count == 0) - /* - * We can return ENOENT earlier than when register the - * kprobe. - */ - return ERR_PTR(-ENOENT); + ret = validate_probe_symbol(func); + if (ret) + return ERR_PTR(ret); } /* From b0bdb43852bf7f55ba02f0cbf00b4ea7ca897bff Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sat, 6 Jul 2024 23:43:04 -0700 Subject: [PATCH 217/374] hwmon: (adc128d818) Fix underflows seen when writing limit attributes [ Upstream commit 8cad724c8537fe3e0da8004646abc00290adae40 ] DIV_ROUND_CLOSEST() after kstrtol() results in an underflow if a large negative number such as -9223372036854775808 is provided by the user. Fix it by reordering clamp_val() and DIV_ROUND_CLOSEST() operations. Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin --- drivers/hwmon/adc128d818.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c index 8ac6e735ec5c..5e805d4ee76a 100644 --- a/drivers/hwmon/adc128d818.c +++ b/drivers/hwmon/adc128d818.c @@ -175,7 +175,7 @@ static ssize_t adc128_in_store(struct device *dev, mutex_lock(&data->update_lock); /* 10 mV LSB on limit registers */ - regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255); + regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10); data->in[index][nr] = regval << 4; reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr); i2c_smbus_write_byte_data(data->client, reg, regval); @@ -213,7 +213,7 @@ static ssize_t adc128_temp_store(struct device *dev, return err; mutex_lock(&data->update_lock); - regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); + regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000); data->temp[index] = regval << 1; i2c_smbus_write_byte_data(data->client, index == 1 ? ADC128_REG_TEMP_MAX From 16f42953231be1e7be77bc24005270d9e0d9d2ee Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sat, 6 Jul 2024 23:48:42 -0700 Subject: [PATCH 218/374] hwmon: (lm95234) Fix underflows seen when writing limit attributes [ Upstream commit af64e3e1537896337405f880c1e9ac1f8c0c6198 ] DIV_ROUND_CLOSEST() after kstrtol() results in an underflow if a large negative number such as -9223372036854775808 is provided by the user. Fix it by reordering clamp_val() and DIV_ROUND_CLOSEST() operations. Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin --- drivers/hwmon/lm95234.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c index 67b9d7636ee4..37e8e9679aeb 100644 --- a/drivers/hwmon/lm95234.c +++ b/drivers/hwmon/lm95234.c @@ -301,7 +301,8 @@ static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr, if (ret < 0) return ret; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000), + 1000); mutex_lock(&data->update_lock); data->tcrit2[index] = val; @@ -350,7 +351,7 @@ static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr, if (ret < 0) return ret; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000); mutex_lock(&data->update_lock); data->tcrit1[index] = val; @@ -391,7 +392,7 @@ static ssize_t tcrit1_hyst_store(struct device *dev, if (ret < 0) return ret; - val = DIV_ROUND_CLOSEST(val, 1000); + val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000); val = clamp_val((int)data->tcrit1[index] - val, 0, 31); mutex_lock(&data->update_lock); @@ -431,7 +432,7 @@ static ssize_t offset_store(struct device *dev, struct device_attribute *attr, return ret; /* Accuracy is 1/2 degrees C */ - val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500); mutex_lock(&data->update_lock); data->toffset[index] = val; From 996221b030995cc5f5baa4a642201d64b62a17cd Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sat, 6 Jul 2024 23:50:08 -0700 Subject: [PATCH 219/374] hwmon: (nct6775-core) Fix underflows seen when writing limit attributes [ Upstream commit 0403e10bf0824bf0ec2bb135d4cf1c0cc3bf4bf0 ] DIV_ROUND_CLOSEST() after kstrtol() results in an underflow if a large negative number such as -9223372036854775808 is provided by the user. Fix it by reordering clamp_val() and DIV_ROUND_CLOSEST() operations. Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin --- drivers/hwmon/nct6775-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c index 9fbab8f02334..934fed3dd586 100644 --- a/drivers/hwmon/nct6775-core.c +++ b/drivers/hwmon/nct6775-core.c @@ -2262,7 +2262,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr, if (err < 0) return err; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000); mutex_lock(&data->update_lock); data->temp_offset[nr] = val; From 26825b62bd1bd3e53b4f44e0745cb516d5186343 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sat, 6 Jul 2024 23:51:34 -0700 Subject: [PATCH 220/374] hwmon: (w83627ehf) Fix underflows seen when writing limit attributes [ Upstream commit 5c1de37969b7bc0abcb20b86e91e70caebbd4f89 ] DIV_ROUND_CLOSEST() after kstrtol() results in an underflow if a large negative number such as -9223372036854775808 is provided by the user. Fix it by reordering clamp_val() and DIV_ROUND_CLOSEST() operations. Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin --- drivers/hwmon/w83627ehf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index fe960c0a624f..7d7d70afde65 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -895,7 +895,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr, if (err < 0) return err; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000); mutex_lock(&data->update_lock); data->target_temp[nr] = val; @@ -920,7 +920,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr, return err; /* Limit the temp to 0C - 15C */ - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000); mutex_lock(&data->update_lock); reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); From 899eddc06e026ec6e4ebe13fe76f503caad89f80 Mon Sep 17 00:00:00 2001 From: Shenghao Ding Date: Sun, 7 Jul 2024 16:30:07 +0800 Subject: [PATCH 221/374] ASoc: TAS2781: replace beXX_to_cpup with get_unaligned_beXX for potentially broken alignment [ Upstream commit 1cc509edbe23b61e8c245611bd15d88edb635a38 ] Use get_unaligned_be16 instead of be16_to_cpup and get_unaligned_be32 instead of be32_to_cpup for potentially broken alignment. Signed-off-by: Shenghao Ding Link: https://patch.msgid.link/20240707083011.98-1-shenghao-ding@ti.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/codecs/tas2781-fmwlib.c | 71 +++++++++++++++---------------- 1 file changed, 35 insertions(+), 36 deletions(-) diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c index 08082806d589..8f9a3ae7153e 100644 --- a/sound/soc/codecs/tas2781-fmwlib.c +++ b/sound/soc/codecs/tas2781-fmwlib.c @@ -21,7 +21,7 @@ #include #include #include - +#include #define ERROR_PRAM_CRCCHK 0x0000000 #define ERROR_YRAM_CRCCHK 0x0000001 @@ -187,8 +187,7 @@ static struct tasdevice_config_info *tasdevice_add_config( /* convert data[offset], data[offset + 1], data[offset + 2] and * data[offset + 3] into host */ - cfg_info->nblocks = - be32_to_cpup((__be32 *)&config_data[config_offset]); + cfg_info->nblocks = get_unaligned_be32(&config_data[config_offset]); config_offset += 4; /* Several kinds of dsp/algorithm firmwares can run on tas2781, @@ -232,14 +231,14 @@ static struct tasdevice_config_info *tasdevice_add_config( } bk_da[i]->yram_checksum = - be16_to_cpup((__be16 *)&config_data[config_offset]); + get_unaligned_be16(&config_data[config_offset]); config_offset += 2; bk_da[i]->block_size = - be32_to_cpup((__be32 *)&config_data[config_offset]); + get_unaligned_be32(&config_data[config_offset]); config_offset += 4; bk_da[i]->n_subblks = - be32_to_cpup((__be32 *)&config_data[config_offset]); + get_unaligned_be32(&config_data[config_offset]); config_offset += 4; @@ -289,7 +288,7 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw) } buf = (unsigned char *)fmw->data; - fw_hdr->img_sz = be32_to_cpup((__be32 *)&buf[offset]); + fw_hdr->img_sz = get_unaligned_be32(&buf[offset]); offset += 4; if (fw_hdr->img_sz != fmw->size) { dev_err(tas_priv->dev, @@ -300,9 +299,9 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw) goto out; } - fw_hdr->checksum = be32_to_cpup((__be32 *)&buf[offset]); + fw_hdr->checksum = get_unaligned_be32(&buf[offset]); offset += 4; - fw_hdr->binary_version_num = be32_to_cpup((__be32 *)&buf[offset]); + fw_hdr->binary_version_num = get_unaligned_be32(&buf[offset]); if (fw_hdr->binary_version_num < 0x103) { dev_err(tas_priv->dev, "File version 0x%04x is too low", fw_hdr->binary_version_num); @@ -311,7 +310,7 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw) goto out; } offset += 4; - fw_hdr->drv_fw_version = be32_to_cpup((__be32 *)&buf[offset]); + fw_hdr->drv_fw_version = get_unaligned_be32(&buf[offset]); offset += 8; fw_hdr->plat_type = buf[offset]; offset += 1; @@ -339,11 +338,11 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw) for (i = 0; i < TASDEVICE_DEVICE_SUM; i++, offset++) fw_hdr->devs[i] = buf[offset]; - fw_hdr->nconfig = be32_to_cpup((__be32 *)&buf[offset]); + fw_hdr->nconfig = get_unaligned_be32(&buf[offset]); offset += 4; for (i = 0; i < TASDEVICE_CONFIG_SUM; i++) { - fw_hdr->config_size[i] = be32_to_cpup((__be32 *)&buf[offset]); + fw_hdr->config_size[i] = get_unaligned_be32(&buf[offset]); offset += 4; total_config_sz += fw_hdr->config_size[i]; } @@ -423,7 +422,7 @@ static int fw_parse_block_data_kernel(struct tasdevice_fw *tas_fmw, /* convert data[offset], data[offset + 1], data[offset + 2] and * data[offset + 3] into host */ - block->type = be32_to_cpup((__be32 *)&data[offset]); + block->type = get_unaligned_be32(&data[offset]); offset += 4; block->is_pchksum_present = data[offset]; @@ -438,10 +437,10 @@ static int fw_parse_block_data_kernel(struct tasdevice_fw *tas_fmw, block->ychksum = data[offset]; offset++; - block->blk_size = be32_to_cpup((__be32 *)&data[offset]); + block->blk_size = get_unaligned_be32(&data[offset]); offset += 4; - block->nr_subblocks = be32_to_cpup((__be32 *)&data[offset]); + block->nr_subblocks = get_unaligned_be32(&data[offset]); offset += 4; /* fixed m68k compiling issue: @@ -482,7 +481,7 @@ static int fw_parse_data_kernel(struct tasdevice_fw *tas_fmw, offset = -EINVAL; goto out; } - img_data->nr_blk = be32_to_cpup((__be32 *)&data[offset]); + img_data->nr_blk = get_unaligned_be32(&data[offset]); offset += 4; img_data->dev_blks = kcalloc(img_data->nr_blk, @@ -578,14 +577,14 @@ static int fw_parse_variable_header_kernel( offset = -EINVAL; goto out; } - fw_hdr->device_family = be16_to_cpup((__be16 *)&buf[offset]); + fw_hdr->device_family = get_unaligned_be16(&buf[offset]); if (fw_hdr->device_family != 0) { dev_err(tas_priv->dev, "%s:not TAS device\n", __func__); offset = -EINVAL; goto out; } offset += 2; - fw_hdr->device = be16_to_cpup((__be16 *)&buf[offset]); + fw_hdr->device = get_unaligned_be16(&buf[offset]); if (fw_hdr->device >= TASDEVICE_DSP_TAS_MAX_DEVICE || fw_hdr->device == 6) { dev_err(tas_priv->dev, "Unsupported dev %d\n", fw_hdr->device); @@ -603,7 +602,7 @@ static int fw_parse_variable_header_kernel( goto out; } - tas_fmw->nr_programs = be32_to_cpup((__be32 *)&buf[offset]); + tas_fmw->nr_programs = get_unaligned_be32(&buf[offset]); offset += 4; if (tas_fmw->nr_programs == 0 || tas_fmw->nr_programs > @@ -622,14 +621,14 @@ static int fw_parse_variable_header_kernel( for (i = 0; i < tas_fmw->nr_programs; i++) { program = &(tas_fmw->programs[i]); - program->prog_size = be32_to_cpup((__be32 *)&buf[offset]); + program->prog_size = get_unaligned_be32(&buf[offset]); offset += 4; } /* Skip the unused prog_size */ offset += 4 * (TASDEVICE_MAXPROGRAM_NUM_KERNEL - tas_fmw->nr_programs); - tas_fmw->nr_configurations = be32_to_cpup((__be32 *)&buf[offset]); + tas_fmw->nr_configurations = get_unaligned_be32(&buf[offset]); offset += 4; /* The max number of config in firmware greater than 4 pieces of @@ -661,7 +660,7 @@ static int fw_parse_variable_header_kernel( for (i = 0; i < tas_fmw->nr_programs; i++) { config = &(tas_fmw->configs[i]); - config->cfg_size = be32_to_cpup((__be32 *)&buf[offset]); + config->cfg_size = get_unaligned_be32(&buf[offset]); offset += 4; } @@ -699,7 +698,7 @@ static int tasdevice_process_block(void *context, unsigned char *data, switch (subblk_typ) { case TASDEVICE_CMD_SING_W: { int i; - unsigned short len = be16_to_cpup((__be16 *)&data[2]); + unsigned short len = get_unaligned_be16(&data[2]); subblk_offset += 2; if (subblk_offset + 4 * len > sublocksize) { @@ -725,7 +724,7 @@ static int tasdevice_process_block(void *context, unsigned char *data, } break; case TASDEVICE_CMD_BURST: { - unsigned short len = be16_to_cpup((__be16 *)&data[2]); + unsigned short len = get_unaligned_be16(&data[2]); subblk_offset += 2; if (subblk_offset + 4 + len > sublocksize) { @@ -766,7 +765,7 @@ static int tasdevice_process_block(void *context, unsigned char *data, is_err = true; break; } - sleep_time = be16_to_cpup((__be16 *)&data[2]) * 1000; + sleep_time = get_unaligned_be16(&data[2]) * 1000; usleep_range(sleep_time, sleep_time + 50); subblk_offset += 2; } @@ -910,7 +909,7 @@ static int fw_parse_variable_hdr(struct tasdevice_priv offset += len; - fw_hdr->device_family = be32_to_cpup((__be32 *)&buf[offset]); + fw_hdr->device_family = get_unaligned_be32(&buf[offset]); if (fw_hdr->device_family != 0) { dev_err(tas_priv->dev, "%s: not TAS device\n", __func__); offset = -EINVAL; @@ -918,7 +917,7 @@ static int fw_parse_variable_hdr(struct tasdevice_priv } offset += 4; - fw_hdr->device = be32_to_cpup((__be32 *)&buf[offset]); + fw_hdr->device = get_unaligned_be32(&buf[offset]); if (fw_hdr->device >= TASDEVICE_DSP_TAS_MAX_DEVICE || fw_hdr->device == 6) { dev_err(tas_priv->dev, "Unsupported dev %d\n", fw_hdr->device); @@ -963,7 +962,7 @@ static int fw_parse_block_data(struct tasdevice_fw *tas_fmw, offset = -EINVAL; goto out; } - block->type = be32_to_cpup((__be32 *)&data[offset]); + block->type = get_unaligned_be32(&data[offset]); offset += 4; if (tas_fmw->fw_hdr.fixed_hdr.drv_ver >= PPC_DRIVER_CRCCHK) { @@ -988,7 +987,7 @@ static int fw_parse_block_data(struct tasdevice_fw *tas_fmw, block->is_ychksum_present = 0; } - block->nr_cmds = be32_to_cpup((__be32 *)&data[offset]); + block->nr_cmds = get_unaligned_be32(&data[offset]); offset += 4; n = block->nr_cmds * 4; @@ -1039,7 +1038,7 @@ static int fw_parse_data(struct tasdevice_fw *tas_fmw, goto out; } offset += n; - img_data->nr_blk = be16_to_cpup((__be16 *)&data[offset]); + img_data->nr_blk = get_unaligned_be16(&data[offset]); offset += 2; img_data->dev_blks = kcalloc(img_data->nr_blk, @@ -1076,7 +1075,7 @@ static int fw_parse_program_data(struct tasdevice_priv *tas_priv, offset = -EINVAL; goto out; } - tas_fmw->nr_programs = be16_to_cpup((__be16 *)&buf[offset]); + tas_fmw->nr_programs = get_unaligned_be16(&buf[offset]); offset += 2; if (tas_fmw->nr_programs == 0) { @@ -1143,7 +1142,7 @@ static int fw_parse_configuration_data( offset = -EINVAL; goto out; } - tas_fmw->nr_configurations = be16_to_cpup((__be16 *)&data[offset]); + tas_fmw->nr_configurations = get_unaligned_be16(&data[offset]); offset += 2; if (tas_fmw->nr_configurations == 0) { @@ -1775,7 +1774,7 @@ static int fw_parse_header(struct tasdevice_priv *tas_priv, /* Convert data[offset], data[offset + 1], data[offset + 2] and * data[offset + 3] into host */ - fw_fixed_hdr->fwsize = be32_to_cpup((__be32 *)&buf[offset]); + fw_fixed_hdr->fwsize = get_unaligned_be32(&buf[offset]); offset += 4; if (fw_fixed_hdr->fwsize != fmw->size) { dev_err(tas_priv->dev, "File size not match, %lu %u", @@ -1784,9 +1783,9 @@ static int fw_parse_header(struct tasdevice_priv *tas_priv, goto out; } offset += 4; - fw_fixed_hdr->ppcver = be32_to_cpup((__be32 *)&buf[offset]); + fw_fixed_hdr->ppcver = get_unaligned_be32(&buf[offset]); offset += 8; - fw_fixed_hdr->drv_ver = be32_to_cpup((__be32 *)&buf[offset]); + fw_fixed_hdr->drv_ver = get_unaligned_be32(&buf[offset]); offset += 72; out: @@ -1828,7 +1827,7 @@ static int fw_parse_calibration_data(struct tasdevice_priv *tas_priv, offset = -EINVAL; goto out; } - tas_fmw->nr_calibrations = be16_to_cpup((__be16 *)&data[offset]); + tas_fmw->nr_calibrations = get_unaligned_be16(&data[offset]); offset += 2; if (tas_fmw->nr_calibrations != 1) { From 9feed1c5260cf1d161dea7a56a28d2dfa0cfa48a Mon Sep 17 00:00:00 2001 From: Andreas Ziegler Date: Wed, 3 Jul 2024 10:34:36 +0200 Subject: [PATCH 222/374] libbpf: Add NULL checks to bpf_object__{prev_map,next_map} [ Upstream commit cedc12c5b57f7efa6dbebfb2b140e8675f5a2616 ] In the current state, an erroneous call to bpf_object__find_map_by_name(NULL, ...) leads to a segmentation fault through the following call chain: bpf_object__find_map_by_name(obj = NULL, ...) -> bpf_object__for_each_map(pos, obj = NULL) -> bpf_object__next_map((obj = NULL), NULL) -> return (obj = NULL)->maps While calling bpf_object__find_map_by_name with obj = NULL is obviously incorrect, this should not lead to a segmentation fault but rather be handled gracefully. As __bpf_map__iter already handles this situation correctly, we can delegate the check for the regular case there and only add a check in case the prev or next parameter is NULL. Signed-off-by: Andreas Ziegler Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240703083436.505124-1-ziegler.andreas@siemens.com Signed-off-by: Sasha Levin --- tools/lib/bpf/libbpf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 5401f2df463d..5edb71764784 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10336,7 +10336,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) struct bpf_map * bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) { - if (prev == NULL) + if (prev == NULL && obj != NULL) return obj->maps; return __bpf_map__iter(prev, obj, 1); @@ -10345,7 +10345,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) struct bpf_map * bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) { - if (next == NULL) { + if (next == NULL && obj != NULL) { if (!obj->nr_maps) return NULL; return obj->maps + obj->nr_maps - 1; From bce2c507d99e5b764101573186033ef35ae2f0f0 Mon Sep 17 00:00:00 2001 From: Yifan Zha Date: Thu, 27 Jun 2024 15:06:23 +0800 Subject: [PATCH 223/374] drm/amdgpu: Set no_hw_access when VF request full GPU fails [ Upstream commit 33f23fc3155b13c4a96d94a0a22dc26db767440b ] [Why] If VF request full GPU access and the request failed, the VF driver can get stuck accessing registers for an extended period during the unload of KMS. [How] Set no_hw_access flag when VF request for full GPU access fails This prevents further hardware access attempts, avoiding the prolonged stuck state. Signed-off-by: Yifan Zha Acked-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 2359d1d60275..26cea0076c9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -86,8 +86,10 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) if (virt->ops && virt->ops->req_full_gpu) { r = virt->ops->req_full_gpu(adev, init); - if (r) + if (r) { + adev->no_hw_access = true; return r; + } adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; } From 6dd40d964e673102c95f78086beec6ed5f82aab4 Mon Sep 17 00:00:00 2001 From: "Luis Henriques (SUSE)" Date: Wed, 29 May 2024 10:20:30 +0100 Subject: [PATCH 224/374] ext4: fix possible tid_t sequence overflows [ Upstream commit 63469662cc45d41705f14b4648481d5d29cf5999 ] In the fast commit code there are a few places where tid_t variables are being compared without taking into account the fact that these sequence numbers may wrap. Fix this issue by using the helper functions tid_gt() and tid_geq(). Signed-off-by: Luis Henriques (SUSE) Reviewed-by: Jan Kara Reviewed-by: Harshad Shirwadkar Link: https://patch.msgid.link/20240529092030.9557-3-luis.henriques@linux.dev Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/ext4/fast_commit.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index d3a67bc06d10..3926a05eceee 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -353,7 +353,7 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handl read_unlock(&sbi->s_journal->j_state_lock); } spin_lock(&sbi->s_fc_lock); - if (sbi->s_fc_ineligible_tid < tid) + if (tid_gt(tid, sbi->s_fc_ineligible_tid)) sbi->s_fc_ineligible_tid = tid; spin_unlock(&sbi->s_fc_lock); WARN_ON(reason >= EXT4_FC_REASON_MAX); @@ -1213,7 +1213,7 @@ int ext4_fc_commit(journal_t *journal, tid_t commit_tid) if (ret == -EALREADY) { /* There was an ongoing commit, check if we need to restart */ if (atomic_read(&sbi->s_fc_subtid) <= subtid && - commit_tid > journal->j_commit_sequence) + tid_gt(commit_tid, journal->j_commit_sequence)) goto restart_fc; ext4_fc_update_stats(sb, EXT4_FC_STATUS_SKIPPED, 0, 0, commit_tid); @@ -1288,7 +1288,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid) list_del_init(&iter->i_fc_list); ext4_clear_inode_state(&iter->vfs_inode, EXT4_STATE_FC_COMMITTING); - if (iter->i_sync_tid <= tid) + if (tid_geq(tid, iter->i_sync_tid)) ext4_fc_reset_inode(&iter->vfs_inode); /* Make sure EXT4_STATE_FC_COMMITTING bit is clear */ smp_mb(); @@ -1319,7 +1319,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid) list_splice_init(&sbi->s_fc_q[FC_Q_STAGING], &sbi->s_fc_q[FC_Q_MAIN]); - if (tid >= sbi->s_fc_ineligible_tid) { + if (tid_geq(tid, sbi->s_fc_ineligible_tid)) { sbi->s_fc_ineligible_tid = 0; ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE); } From 926107e6e3f6c7b9c23eeeb7693e4800716eda85 Mon Sep 17 00:00:00 2001 From: Ye Bin Date: Thu, 20 Jun 2024 15:24:05 +0800 Subject: [PATCH 225/374] jbd2: avoid mount failed when commit block is partial submitted [ Upstream commit 0bab8db4152c4a2185a1367db09cc402bdc62d5e ] We encountered a problem that the file system could not be mounted in the power-off scenario. The analysis of the file system mirror shows that only part of the data is written to the last commit block. The valid data of the commit block is concentrated in the first sector. However, the data of the entire block is involved in the checksum calculation. For different hardware, the minimum atomic unit may be different. If the checksum of a committed block is incorrect, clear the data except the 'commit_header' and then calculate the checksum. If the checkusm is correct, it is considered that the block is partially committed, Then continue to replay journal. Signed-off-by: Ye Bin Reviewed-by: Jan Kara Link: https://patch.msgid.link/20240620072405.3533701-1-yebin@huaweicloud.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/jbd2/recovery.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 1f7664984d6e..0d14b5f39be6 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -443,6 +443,27 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf) return provided == cpu_to_be32(calculated); } +static bool jbd2_commit_block_csum_verify_partial(journal_t *j, void *buf) +{ + struct commit_header *h; + __be32 provided; + __u32 calculated; + void *tmpbuf; + + tmpbuf = kzalloc(j->j_blocksize, GFP_KERNEL); + if (!tmpbuf) + return false; + + memcpy(tmpbuf, buf, sizeof(struct commit_header)); + h = tmpbuf; + provided = h->h_chksum[0]; + h->h_chksum[0] = 0; + calculated = jbd2_chksum(j, j->j_csum_seed, tmpbuf, j->j_blocksize); + kfree(tmpbuf); + + return provided == cpu_to_be32(calculated); +} + static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag, journal_block_tag3_t *tag3, void *buf, __u32 sequence) @@ -810,6 +831,13 @@ static int do_one_pass(journal_t *journal, if (pass == PASS_SCAN && !jbd2_commit_block_csum_verify(journal, bh->b_data)) { + if (jbd2_commit_block_csum_verify_partial( + journal, + bh->b_data)) { + pr_notice("JBD2: Find incomplete commit block in transaction %u block %lu\n", + next_commit_ID, next_log_block); + goto chksum_ok; + } chksum_error: if (commit_time < last_trans_commit_time) goto ignore_crc_mismatch; @@ -824,6 +852,7 @@ static int do_one_pass(journal_t *journal, } } if (pass == PASS_SCAN) { + chksum_ok: last_trans_commit_time = commit_time; head_block = next_log_block; } @@ -843,6 +872,7 @@ static int do_one_pass(journal_t *journal, next_log_block); need_check_commit_time = true; } + /* If we aren't in the REVOKE pass, then we can * just skip over this block. */ if (pass != PASS_REVOKE) { From a90abf931046460902427f0fe8ad2b652c070192 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Thu, 20 Jun 2024 17:28:55 +0800 Subject: [PATCH 226/374] dma-mapping: benchmark: Don't starve others when doing the test [ Upstream commit 54624acf8843375a6de3717ac18df3b5104c39c5 ] The test thread will start N benchmark kthreads and then schedule out until the test time finished and notify the benchmark kthreads to stop. The benchmark kthreads will keep running until notified to stop. There's a problem with current implementation when the benchmark kthreads number is equal to the CPUs on a non-preemptible kernel: since the scheduler will balance the kthreads across the CPUs and when the test time's out the test thread won't get a chance to be scheduled on any CPU then cannot notify the benchmark kthreads to stop. This can be easily reproduced on a VM (simulated with 16 CPUs) with PREEMPT_VOLUNTARY: estuary:/mnt$ ./dma_map_benchmark -t 16 -s 1 rcu: INFO: rcu_sched self-detected stall on CPU rcu: 10-...!: (5221 ticks this GP) idle=ed24/1/0x4000000000000000 softirq=142/142 fqs=0 rcu: (t=5254 jiffies g=-559 q=45 ncpus=16) rcu: rcu_sched kthread starved for 5255 jiffies! g-559 f0x0 RCU_GP_WAIT_FQS(5) ->state=0x0 ->cpu=12 rcu: Unless rcu_sched kthread gets sufficient CPU time, OOM is now expected behavior. rcu: RCU grace-period kthread stack dump: task:rcu_sched state:R running task stack:0 pid:16 tgid:16 ppid:2 flags:0x00000008 Call trace __switch_to+0xec/0x138 __schedule+0x2f8/0x1080 schedule+0x30/0x130 schedule_timeout+0xa0/0x188 rcu_gp_fqs_loop+0x128/0x528 rcu_gp_kthread+0x1c8/0x208 kthread+0xec/0xf8 ret_from_fork+0x10/0x20 Sending NMI from CPU 10 to CPUs 0: NMI backtrace for cpu 0 CPU: 0 PID: 332 Comm: dma-map-benchma Not tainted 6.10.0-rc1-vanilla-LSE #8 Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 pstate: 20400005 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : arm_smmu_cmdq_issue_cmdlist+0x218/0x730 lr : arm_smmu_cmdq_issue_cmdlist+0x488/0x730 sp : ffff80008748b630 x29: ffff80008748b630 x28: 0000000000000000 x27: ffff80008748b780 x26: 0000000000000000 x25: 000000000000bc70 x24: 000000000001bc70 x23: ffff0000c12af080 x22: 0000000000010000 x21: 000000000000ffff x20: ffff80008748b700 x19: ffff0000c12af0c0 x18: 0000000000010000 x17: 0000000000000001 x16: 0000000000000040 x15: ffffffffffffffff x14: 0001ffffffffffff x13: 000000000000ffff x12: 00000000000002f1 x11: 000000000001ffff x10: 0000000000000031 x9 : ffff800080b6b0b8 x8 : ffff0000c2a48000 x7 : 000000000001bc71 x6 : 0001800000000000 x5 : 00000000000002f1 x4 : 01ffffffffffffff x3 : 000000000009aaf1 x2 : 0000000000000018 x1 : 000000000000000f x0 : ffff0000c12af18c Call trace: arm_smmu_cmdq_issue_cmdlist+0x218/0x730 __arm_smmu_tlb_inv_range+0xe0/0x1a8 arm_smmu_iotlb_sync+0xc0/0x128 __iommu_dma_unmap+0x248/0x320 iommu_dma_unmap_page+0x5c/0xe8 dma_unmap_page_attrs+0x38/0x1d0 map_benchmark_thread+0x118/0x2c0 kthread+0xec/0xf8 ret_from_fork+0x10/0x20 Solve this by adding scheduling point in the kthread loop, so if there're other threads in the system they may have a chance to run, especially the thread to notify the test end. However this may degrade the test concurrency so it's recommended to run this on an idle system. Signed-off-by: Yicong Yang Acked-by: Barry Song Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- kernel/dma/map_benchmark.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c index 4950e0b622b1..cc19a3efea89 100644 --- a/kernel/dma/map_benchmark.c +++ b/kernel/dma/map_benchmark.c @@ -89,6 +89,22 @@ static int map_benchmark_thread(void *data) atomic64_add(map_sq, &map->sum_sq_map); atomic64_add(unmap_sq, &map->sum_sq_unmap); atomic64_inc(&map->loops); + + /* + * We may test for a long time so periodically check whether + * we need to schedule to avoid starving the others. Otherwise + * we may hangup the kernel in a non-preemptible kernel when + * the test kthreads number >= CPU number, the test kthreads + * will run endless on every CPU since the thread resposible + * for notifying the kthread stop (in do_map_benchmark()) + * could not be scheduled. + * + * Note this may degrade the test concurrency since the test + * threads may need to share the CPU time with other load + * in the system. So it's recommended to run this benchmark + * on an idle system. + */ + cond_resched(); } out: From c16916dd6c16fa7e13ca3923eb6b9f50d848ad03 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Wed, 3 Jul 2024 09:24:09 +0200 Subject: [PATCH 227/374] wifi: mwifiex: Do not return unused priv in mwifiex_get_priv_by_id() [ Upstream commit c145eea2f75ff7949392aebecf7ef0a81c1f6c14 ] mwifiex_get_priv_by_id() returns the priv pointer corresponding to the bss_num and bss_type, but without checking if the priv is actually currently in use. Unused priv pointers do not have a wiphy attached to them which can lead to NULL pointer dereferences further down the callstack. Fix this by returning only used priv pointers which have priv->bss_mode set to something else than NL80211_IFTYPE_UNSPECIFIED. Said NULL pointer dereference happened when an Accesspoint was started with wpa_supplicant -i mlan0 with this config: network={ ssid="somessid" mode=2 frequency=2412 key_mgmt=WPA-PSK WPA-PSK-SHA256 proto=RSN group=CCMP pairwise=CCMP psk="12345678" } When waiting for the AP to be established, interrupting wpa_supplicant with and starting it again this happens: | Unable to handle kernel NULL pointer dereference at virtual address 0000000000000140 | Mem abort info: | ESR = 0x0000000096000004 | EC = 0x25: DABT (current EL), IL = 32 bits | SET = 0, FnV = 0 | EA = 0, S1PTW = 0 | FSC = 0x04: level 0 translation fault | Data abort info: | ISV = 0, ISS = 0x00000004, ISS2 = 0x00000000 | CM = 0, WnR = 0, TnD = 0, TagAccess = 0 | GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0 | user pgtable: 4k pages, 48-bit VAs, pgdp=0000000046d96000 | [0000000000000140] pgd=0000000000000000, p4d=0000000000000000 | Internal error: Oops: 0000000096000004 [#1] PREEMPT SMP | Modules linked in: caam_jr caamhash_desc spidev caamalg_desc crypto_engine authenc libdes mwifiex_sdio +mwifiex crct10dif_ce cdc_acm onboard_usb_hub fsl_imx8_ddr_perf imx8m_ddrc rtc_ds1307 lm75 rtc_snvs +imx_sdma caam imx8mm_thermal spi_imx error imx_cpufreq_dt fuse ip_tables x_tables ipv6 | CPU: 0 PID: 8 Comm: kworker/0:1 Not tainted 6.9.0-00007-g937242013fce-dirty #18 | Hardware name: somemachine (DT) | Workqueue: events sdio_irq_work | pstate: 00000005 (nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : mwifiex_get_cfp+0xd8/0x15c [mwifiex] | lr : mwifiex_get_cfp+0x34/0x15c [mwifiex] | sp : ffff8000818b3a70 | x29: ffff8000818b3a70 x28: ffff000006bfd8a5 x27: 0000000000000004 | x26: 000000000000002c x25: 0000000000001511 x24: 0000000002e86bc9 | x23: ffff000006bfd996 x22: 0000000000000004 x21: ffff000007bec000 | x20: 000000000000002c x19: 0000000000000000 x18: 0000000000000000 | x17: 000000040044ffff x16: 00500072b5503510 x15: ccc283740681e517 | x14: 0201000101006d15 x13: 0000000002e8ff43 x12: 002c01000000ffb1 | x11: 0100000000000000 x10: 02e8ff43002c0100 x9 : 0000ffb100100157 | x8 : ffff000003d20000 x7 : 00000000000002f1 x6 : 00000000ffffe124 | x5 : 0000000000000001 x4 : 0000000000000003 x3 : 0000000000000000 | x2 : 0000000000000000 x1 : 0001000000011001 x0 : 0000000000000000 | Call trace: | mwifiex_get_cfp+0xd8/0x15c [mwifiex] | mwifiex_parse_single_response_buf+0x1d0/0x504 [mwifiex] | mwifiex_handle_event_ext_scan_report+0x19c/0x2f8 [mwifiex] | mwifiex_process_sta_event+0x298/0xf0c [mwifiex] | mwifiex_process_event+0x110/0x238 [mwifiex] | mwifiex_main_process+0x428/0xa44 [mwifiex] | mwifiex_sdio_interrupt+0x64/0x12c [mwifiex_sdio] | process_sdio_pending_irqs+0x64/0x1b8 | sdio_irq_work+0x4c/0x7c | process_one_work+0x148/0x2a0 | worker_thread+0x2fc/0x40c | kthread+0x110/0x114 | ret_from_fork+0x10/0x20 | Code: a94153f3 a8c37bfd d50323bf d65f03c0 (f940a000) | ---[ end trace 0000000000000000 ]--- Signed-off-by: Sascha Hauer Acked-by: Brian Norris Reviewed-by: Francesco Dolcini Signed-off-by: Kalle Valo Link: https://patch.msgid.link/20240703072409.556618-1-s.hauer@pengutronix.de Signed-off-by: Sasha Levin --- drivers/net/wireless/marvell/mwifiex/main.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 175882485a19..c5164ae41b54 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1287,6 +1287,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter, for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { + if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED) + continue; + if ((adapter->priv[i]->bss_num == bss_num) && (adapter->priv[i]->bss_type == bss_type)) break; From 412db603a1d2327c71ef7ec5eeeebc605d9e0dbb Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 8 Jul 2024 12:33:34 -0700 Subject: [PATCH 228/374] perf/x86/intel: Hide Topdown metrics events if the feature is not enumerated [ Upstream commit 556a7c039a52c21da33eaae9269984a1ef59189b ] The below error is observed on Ice Lake VM. $ perf stat Error: The sys_perf_event_open() syscall returned with 22 (Invalid argument) for event (slots). /bin/dmesg | grep -i perf may provide additional information. In a virtualization env, the Topdown metrics and the slots event haven't been supported yet. The guest CPUID doesn't enumerate them. However, the current kernel unconditionally exposes the slots event and the Topdown metrics events to sysfs, which misleads the perf tool and triggers the error. Hide the perf-metrics topdown events and the slots event if the perf-metrics feature is not enumerated. The big core of a hybrid platform can also supports the perf-metrics feature. Fix the hybrid platform as well. Closes: https://lore.kernel.org/lkml/CAM9d7cj8z+ryyzUHR+P1Dcpot2jjW+Qcc4CPQpfafTXN=LEU0Q@mail.gmail.com/ Reported-by: Dongli Zhang Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Dongli Zhang Link: https://lkml.kernel.org/r/20240708193336.1192217-2-kan.liang@linux.intel.com Signed-off-by: Sasha Levin --- arch/x86/events/intel/core.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 05ec651663cb..dcac96133cb6 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -5733,8 +5733,22 @@ exra_is_visible(struct kobject *kobj, struct attribute *attr, int i) return x86_pmu.version >= 2 ? attr->mode : 0; } +static umode_t +td_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + /* + * Hide the perf metrics topdown events + * if the feature is not enumerated. + */ + if (x86_pmu.num_topdown_events) + return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0; + + return attr->mode; +} + static struct attribute_group group_events_td = { .name = "events", + .is_visible = td_is_visible, }; static struct attribute_group group_events_mem = { @@ -5936,9 +5950,27 @@ static umode_t hybrid_format_is_visible(struct kobject *kobj, return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0; } +static umode_t hybrid_td_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *dev = kobj_to_dev(kobj); + struct x86_hybrid_pmu *pmu = + container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); + + if (!is_attr_for_this_pmu(kobj, attr)) + return 0; + + + /* Only the big core supports perf metrics */ + if (pmu->pmu_type == hybrid_big) + return pmu->intel_cap.perf_metrics ? attr->mode : 0; + + return attr->mode; +} + static struct attribute_group hybrid_group_events_td = { .name = "events", - .is_visible = hybrid_events_is_visible, + .is_visible = hybrid_td_is_visible, }; static struct attribute_group hybrid_group_events_mem = { From bc89b8a2c6e2b3c2bf8139d9d11247999284012b Mon Sep 17 00:00:00 2001 From: Mrinmay Sarkar Date: Mon, 11 Mar 2024 19:41:35 +0530 Subject: [PATCH 229/374] PCI: qcom: Override NO_SNOOP attribute for SA8775P RC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 1d648bf79d4dca909f242b1a0cdc458e4f9d0253 ] Due to some hardware changes, SA8775P has set the NO_SNOOP attribute in its TLP for all the PCIe controllers. NO_SNOOP attribute when set, the requester is indicating that no cache coherency issue exist for the addressed memory on the endpoint i.e., memory is not cached. But in reality, requester cannot assume this unless there is a complete control/visibility over the addressed memory on the endpoint. And worst case, if the memory is cached on the endpoint, it may lead to memory corruption issues. It should be noted that the caching of memory on the endpoint is not solely dependent on the NO_SNOOP attribute in TLP. So to avoid the corruption, this patch overrides the NO_SNOOP attribute by setting the PCIE_PARF_NO_SNOOP_OVERIDE register. This patch is not needed for other upstream supported platforms since they do not set NO_SNOOP attribute by default. 8775 has IP version 1.34.0 so introduce a new cfg(cfg_1_34_0) for this platform. Assign override_no_snoop flag into struct qcom_pcie_cfg and set it true in cfg_1_34_0 and enable cache snooping if this particular flag is true. Link: https://lore.kernel.org/linux-pci/1710166298-27144-2-git-send-email-quic_msarkar@quicinc.com Signed-off-by: Mrinmay Sarkar Signed-off-by: Krzysztof Wilczyński [bhelgaas: wrap comments to fit in 80 columns] Signed-off-by: Bjorn Helgaas Reviewed-by: Manivannan Sadhasivam Signed-off-by: Sasha Levin --- drivers/pci/controller/dwc/pcie-qcom.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 14772edcf0d3..7fa1fe5a29e3 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -51,6 +51,7 @@ #define PARF_SID_OFFSET 0x234 #define PARF_BDF_TRANSLATE_CFG 0x24c #define PARF_SLV_ADDR_SPACE_SIZE 0x358 +#define PARF_NO_SNOOP_OVERIDE 0x3d4 #define PARF_DEVICE_TYPE 0x1000 #define PARF_BDF_TO_SID_TABLE_N 0x2000 #define PARF_BDF_TO_SID_CFG 0x2c00 @@ -118,6 +119,10 @@ /* PARF_LTSSM register fields */ #define LTSSM_EN BIT(8) +/* PARF_NO_SNOOP_OVERIDE register fields */ +#define WR_NO_SNOOP_OVERIDE_EN BIT(1) +#define RD_NO_SNOOP_OVERIDE_EN BIT(3) + /* PARF_DEVICE_TYPE register fields */ #define DEVICE_TYPE_RC 0x4 @@ -231,8 +236,15 @@ struct qcom_pcie_ops { int (*config_sid)(struct qcom_pcie *pcie); }; + /** + * struct qcom_pcie_cfg - Per SoC config struct + * @ops: qcom PCIe ops structure + * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache + * snooping + */ struct qcom_pcie_cfg { const struct qcom_pcie_ops *ops; + bool override_no_snoop; bool no_l0s; }; @@ -986,6 +998,12 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) { + const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg; + + if (pcie_cfg->override_no_snoop) + writel(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN, + pcie->parf + PARF_NO_SNOOP_OVERIDE); + qcom_pcie_clear_aspm_l0s(pcie->pci); qcom_pcie_clear_hpc(pcie->pci); @@ -1366,6 +1384,11 @@ static const struct qcom_pcie_cfg cfg_1_9_0 = { .ops = &ops_1_9_0, }; +static const struct qcom_pcie_cfg cfg_1_34_0 = { + .ops = &ops_1_9_0, + .override_no_snoop = true, +}; + static const struct qcom_pcie_cfg cfg_2_1_0 = { .ops = &ops_2_1_0, }; @@ -1667,7 +1690,7 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp }, - { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0}, + { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0}, { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp }, From 3913cea77d6488a420550ba4cc8ad1851c6cdcf6 Mon Sep 17 00:00:00 2001 From: Umang Jain Date: Wed, 3 Jul 2024 18:40:51 +0530 Subject: [PATCH 230/374] staging: vchiq_core: Bubble up wait_event_interruptible() return value [ Upstream commit c22502cb84d4c963f754e6d943d3133cfa80ba97 ] wait_event_interruptible() returns if the condition evaluates to true it receives a signal. However, the current code always assume that the wait_event_interruptible() returns only when the event is fired. This should not be the case as wait_event_interruptible() can return on receiving a signal (with -ERESTARTSYS as return value). We should consider this and bubble up the return value of wait_event_interruptible() to exactly know if the wait has failed and error out. This will also help to properly stop kthreads in the subsequent patch. Meanwhile at it, remote_wait_event() is modified to return 0 on success, and an error code (from wait_event_interruptible()) on failure. The return value is now checked for remote_wait_event() calls. Signed-off-by: Umang Jain Tested-by: Stefan Wahren Link: https://lore.kernel.org/r/20240703131052.597443-2-umang.jain@ideasonboard.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- .../interface/vchiq_arm/vchiq_core.c | 31 ++++++++++++++----- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index df3af821f218..fb1907414cc1 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -501,16 +501,21 @@ remote_event_create(wait_queue_head_t *wq, struct remote_event *event) * routines where switched to the "interruptible" family of functions, as the * former was deemed unjustified and the use "killable" set all VCHIQ's * threads in D state. + * + * Returns: 0 on success, a negative error code on failure */ static inline int remote_event_wait(wait_queue_head_t *wq, struct remote_event *event) { + int ret = 0; + if (!event->fired) { event->armed = 1; dsb(sy); - if (wait_event_interruptible(*wq, event->fired)) { + ret = wait_event_interruptible(*wq, event->fired); + if (ret) { event->armed = 0; - return 0; + return ret; } event->armed = 0; /* Ensure that the peer sees that we are not waiting (armed == 0). */ @@ -518,7 +523,7 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event) } event->fired = 0; - return 1; + return ret; } /* @@ -1140,6 +1145,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service, struct vchiq_header *header; ssize_t callback_result; int svc_fourcc; + int ret; local = state->local; @@ -1147,7 +1153,9 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service, mutex_lock_killable(&state->sync_mutex)) return -EAGAIN; - remote_event_wait(&state->sync_release_event, &local->sync_release); + ret = remote_event_wait(&state->sync_release_event, &local->sync_release); + if (ret) + return ret; /* Ensure that reads don't overtake the remote_event_wait. */ rmb(); @@ -1929,13 +1937,16 @@ slot_handler_func(void *v) { struct vchiq_state *state = v; struct vchiq_shared_state *local = state->local; + int ret; DEBUG_INITIALISE(local); while (1) { DEBUG_COUNT(SLOT_HANDLER_COUNT); DEBUG_TRACE(SLOT_HANDLER_LINE); - remote_event_wait(&state->trigger_event, &local->trigger); + ret = remote_event_wait(&state->trigger_event, &local->trigger); + if (ret) + return ret; /* Ensure that reads don't overtake the remote_event_wait. */ rmb(); @@ -1966,6 +1977,7 @@ recycle_func(void *v) struct vchiq_shared_state *local = state->local; u32 *found; size_t length; + int ret; length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES); @@ -1975,7 +1987,9 @@ recycle_func(void *v) return -ENOMEM; while (1) { - remote_event_wait(&state->recycle_event, &local->recycle); + ret = remote_event_wait(&state->recycle_event, &local->recycle); + if (ret) + return ret; process_free_queue(state, found, length); } @@ -1992,6 +2006,7 @@ sync_func(void *v) (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, state->remote->slot_sync); int svc_fourcc; + int ret; while (1) { struct vchiq_service *service; @@ -1999,7 +2014,9 @@ sync_func(void *v) int type; unsigned int localport, remoteport; - remote_event_wait(&state->sync_trigger_event, &local->sync_trigger); + ret = remote_event_wait(&state->sync_trigger_event, &local->sync_trigger); + if (ret) + return ret; /* Ensure that reads don't overtake the remote_event_wait. */ rmb(); From 89827a4de802765b1ebb401fc1e73a90108c7520 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Fri, 28 Jun 2024 13:11:11 -0300 Subject: [PATCH 231/374] iommufd: Require drivers to supply the cache_invalidate_user ops [ Upstream commit a11dda723c6493bb1853bbc61c093377f96e2d47 ] If drivers don't do this then iommufd will oops invalidation ioctls with something like: Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000 Mem abort info: ESR = 0x0000000086000004 EC = 0x21: IABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 FSC = 0x04: level 0 translation fault user pgtable: 4k pages, 48-bit VAs, pgdp=0000000101059000 [0000000000000000] pgd=0000000000000000, p4d=0000000000000000 Internal error: Oops: 0000000086000004 [#1] PREEMPT SMP Modules linked in: CPU: 2 PID: 371 Comm: qemu-system-aar Not tainted 6.8.0-rc7-gde77230ac23a #9 Hardware name: linux,dummy-virt (DT) pstate: 81400809 (Nzcv daif +PAN -UAO -TCO +DIT -SSBS BTYPE=-c) pc : 0x0 lr : iommufd_hwpt_invalidate+0xa4/0x204 sp : ffff800080f3bcc0 x29: ffff800080f3bcf0 x28: ffff0000c369b300 x27: 0000000000000000 x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 x23: 0000000000000000 x22: 00000000c1e334a0 x21: ffff0000c1e334a0 x20: ffff800080f3bd38 x19: ffff800080f3bd58 x18: 0000000000000000 x17: 0000000000000000 x16: 0000000000000000 x15: 0000ffff8240d6d8 x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000 x11: 0000000000000000 x10: 0000000000000000 x9 : 0000000000000000 x8 : 0000001000000002 x7 : 0000fffeac1ec950 x6 : 0000000000000000 x5 : ffff800080f3bd78 x4 : 0000000000000003 x3 : 0000000000000002 x2 : 0000000000000000 x1 : ffff800080f3bcc8 x0 : ffff0000c6034d80 Call trace: 0x0 iommufd_fops_ioctl+0x154/0x274 __arm64_sys_ioctl+0xac/0xf0 invoke_syscall+0x48/0x110 el0_svc_common.constprop.0+0x40/0xe0 do_el0_svc+0x1c/0x28 el0_svc+0x34/0xb4 el0t_64_sync_handler+0x120/0x12c el0t_64_sync+0x190/0x194 All existing drivers implement this op for nesting, this is mostly a bisection aid. Fixes: 8c6eabae3807 ("iommufd: Add IOMMU_HWPT_INVALIDATE") Link: https://lore.kernel.org/r/0-v1-e153859bd707+61-iommufd_check_ops_jgg@nvidia.com Reviewed-by: Nicolin Chen Reviewed-by: Yi Liu Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/iommu/iommufd/hw_pagetable.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index 33d142f8057d..a9f1fe44c4c0 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -236,7 +236,8 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, } hwpt->domain->owner = ops; - if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) { + if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED || + !hwpt->domain->ops->cache_invalidate_user)) { rc = -EINVAL; goto out_abort; } From 9cd15511de7c619bbd0f54bb3f28e6e720ded5d6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 10 Jul 2024 16:16:31 +0200 Subject: [PATCH 232/374] bpf: Remove tst_run from lwt_seg6local_prog_ops. [ Upstream commit c13fda93aca118b8e5cd202e339046728ee7dddb ] The syzbot reported that the lwt_seg6 related BPF ops can be invoked via bpf_test_run() without without entering input_action_end_bpf() first. Martin KaFai Lau said that self test for BPF_PROG_TYPE_LWT_SEG6LOCAL probably didn't work since it was introduced in commit 04d4b274e2a ("ipv6: sr: Add seg6local action End.BPF"). The reason is that the per-CPU variable seg6_bpf_srh_states::srh is never assigned in the self test case but each BPF function expects it. Remove test_run for BPF_PROG_TYPE_LWT_SEG6LOCAL. Suggested-by: Martin KaFai Lau Reported-by: syzbot+608a2acde8c5a101d07d@syzkaller.appspotmail.com Fixes: d1542d4ae4df ("seg6: Use nested-BH locking for seg6_bpf_srh_states.") Fixes: 004d4b274e2a ("ipv6: sr: Add seg6local action End.BPF") Signed-off-by: Sebastian Andrzej Siewior Acked-by: Daniel Borkmann Link: https://lore.kernel.org/r/20240710141631.FbmHcQaX@linutronix.de Signed-off-by: Martin KaFai Lau Signed-off-by: Sasha Levin --- net/core/filter.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/core/filter.c b/net/core/filter.c index ab0455c64e49..55b1d9de2334 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -11047,7 +11047,6 @@ const struct bpf_verifier_ops lwt_seg6local_verifier_ops = { }; const struct bpf_prog_ops lwt_seg6local_prog_ops = { - .test_run = bpf_prog_test_run_skb, }; const struct bpf_verifier_ops cg_sock_verifier_ops = { From 16c317f16e387d246fe276869a96bb19b02e3008 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Wed, 3 Jul 2024 13:16:03 +0200 Subject: [PATCH 233/374] watchdog: imx7ulp_wdt: keep already running watchdog enabled [ Upstream commit b771d14f417e9d8030ab000b3341cf71266be90e ] When the bootloader enabled the watchdog before Kernel started then keep it enabled during initialization. Otherwise the time between the watchdog probing and the userspace taking over the watchdog won't be covered by the watchdog. When keeping the watchdog enabled inform the Kernel about this by setting the WDOG_HW_RUNNING so that the periodic watchdog feeder is started when desired. Signed-off-by: Sascha Hauer Reviewed-by: Guenter Roeck Link: https://lore.kernel.org/r/20240703111603.1096424-1-s.hauer@pengutronix.de Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck Signed-off-by: Sasha Levin --- drivers/watchdog/imx7ulp_wdt.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c index b21d7a74a42d..94914a22daff 100644 --- a/drivers/watchdog/imx7ulp_wdt.c +++ b/drivers/watchdog/imx7ulp_wdt.c @@ -290,6 +290,11 @@ static int imx7ulp_wdt_init(struct imx7ulp_wdt_device *wdt, unsigned int timeout if (wdt->ext_reset) val |= WDOG_CS_INT_EN; + if (readl(wdt->base + WDOG_CS) & WDOG_CS_EN) { + set_bit(WDOG_HW_RUNNING, &wdt->wdd.status); + val |= WDOG_CS_EN; + } + do { ret = _imx7ulp_wdt_init(wdt, timeout, val); toval = readl(wdt->base + WDOG_TOVAL); From 0d0d06fbc8f20d5ccda58b8892fe5154f3d6f233 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 19 Jan 2024 14:57:29 +0100 Subject: [PATCH 234/374] drm/amdgpu: reject gang submit on reserved VMIDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 320debca1ba3a81c87247eac84eff976ead09ee0 ] A gang submit won't work if the VMID is reserved and we can't flush out VM changes from multiple engines at the same time. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 15 +++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 15 ++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h | 1 + 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 936c98a13a24..6dfdff58bffd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1096,6 +1096,21 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) unsigned int i; int r; + /* + * We can't use gang submit on with reserved VMIDs when the VM changes + * can't be invalidated by more than one engine at the same time. + */ + if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) { + for (i = 0; i < p->gang_size; ++i) { + struct drm_sched_entity *entity = p->entities[i]; + struct drm_gpu_scheduler *sched = entity->rq->sched; + struct amdgpu_ring *ring = to_amdgpu_ring(sched); + + if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub)) + return -EINVAL; + } + } + r = amdgpu_vm_clear_freed(adev, vm, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 3d7fcdeaf8cf..e8f6e4dbc5a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -406,7 +406,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r || !idle) goto error; - if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) { + if (amdgpu_vmid_uses_reserved(vm, vmhub)) { r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); if (r || !id) goto error; @@ -456,6 +456,19 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, return r; } +/* + * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID + * @vm: the VM to check + * @vmhub: the VMHUB which will be used + * + * Returns: True if the VM will use a reserved VMID. + */ +bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub) +{ + return vm->reserved_vmid[vmhub] || + (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0))); +} + int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, unsigned vmhub) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index fa8c42c83d5d..240fa6751260 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -78,6 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, struct amdgpu_vmid *id); +bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub); int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, unsigned vmhub); void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, From 5e1a3e5ddf6a23aae27c0b489b0e6a4ccbac572b Mon Sep 17 00:00:00 2001 From: Zqiang Date: Thu, 4 Jul 2024 14:52:13 +0800 Subject: [PATCH 235/374] smp: Add missing destroy_work_on_stack() call in smp_call_on_cpu() [ Upstream commit 77aeb1b685f9db73d276bad4bb30d48505a6fd23 ] For CONFIG_DEBUG_OBJECTS_WORK=y kernels sscs.work defined by INIT_WORK_ONSTACK() is initialized by debug_object_init_on_stack() for the debug check in __init_work() to work correctly. But this lacks the counterpart to remove the tracked object from debug objects again, which will cause a debug object warning once the stack is freed. Add the missing destroy_work_on_stack() invocation to cure that. [ tglx: Massaged changelog ] Signed-off-by: Zqiang Signed-off-by: Thomas Gleixner Tested-by: Paul E. McKenney Link: https://lore.kernel.org/r/20240704065213.13559-1-qiang.zhang1211@gmail.com Signed-off-by: Sasha Levin --- kernel/smp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/smp.c b/kernel/smp.c index f085ebcdf9e7..af9b2d0736c8 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -1119,6 +1119,7 @@ int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) queue_work_on(cpu, system_wq, &sscs.work); wait_for_completion(&sscs.done); + destroy_work_on_stack(&sscs.work); return sscs.ret; } From b5a531dd2f7257ee248664f60c97f4eaa12837eb Mon Sep 17 00:00:00 2001 From: Konstantin Komarov Date: Mon, 17 Jun 2024 14:53:57 +0300 Subject: [PATCH 236/374] fs/ntfs3: Check more cases when directory is corrupted [ Upstream commit 744375343662058cbfda96d871786e5a5cbe1947 ] Mark ntfs dirty in this case. Rename ntfs_filldir to ntfs_dir_emit. Signed-off-by: Konstantin Komarov Signed-off-by: Sasha Levin --- fs/ntfs3/dir.c | 52 +++++++++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c index 858efe255f6f..1ec09f2fca64 100644 --- a/fs/ntfs3/dir.c +++ b/fs/ntfs3/dir.c @@ -272,9 +272,12 @@ struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni, return err == -ENOENT ? NULL : err ? ERR_PTR(err) : inode; } -static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, - const struct NTFS_DE *e, u8 *name, - struct dir_context *ctx) +/* + * returns false if 'ctx' if full + */ +static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi, + struct ntfs_inode *ni, const struct NTFS_DE *e, + u8 *name, struct dir_context *ctx) { const struct ATTR_FILE_NAME *fname; unsigned long ino; @@ -284,29 +287,29 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, fname = Add2Ptr(e, sizeof(struct NTFS_DE)); if (fname->type == FILE_NAME_DOS) - return 0; + return true; if (!mi_is_ref(&ni->mi, &fname->home)) - return 0; + return true; ino = ino_get(&e->ref); if (ino == MFT_REC_ROOT) - return 0; + return true; /* Skip meta files. Unless option to show metafiles is set. */ if (!sbi->options->showmeta && ntfs_is_meta_file(sbi, ino)) - return 0; + return true; if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN)) - return 0; + return true; name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name, PATH_MAX); if (name_len <= 0) { ntfs_warn(sbi->sb, "failed to convert name for inode %lx.", ino); - return 0; + return true; } /* @@ -336,17 +339,20 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, } } - return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type); + return dir_emit(ctx, (s8 *)name, name_len, ino, dt_type); } /* * ntfs_read_hdr - Helper function for ntfs_readdir(). + * + * returns 0 if ok. + * returns -EINVAL if directory is corrupted. + * returns +1 if 'ctx' is full. */ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, const struct INDEX_HDR *hdr, u64 vbo, u64 pos, u8 *name, struct dir_context *ctx) { - int err; const struct NTFS_DE *e; u32 e_size; u32 end = le32_to_cpu(hdr->used); @@ -354,12 +360,12 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, for (;; off += e_size) { if (off + sizeof(struct NTFS_DE) > end) - return -1; + return -EINVAL; e = Add2Ptr(hdr, off); e_size = le16_to_cpu(e->size); if (e_size < sizeof(struct NTFS_DE) || off + e_size > end) - return -1; + return -EINVAL; if (de_is_last(e)) return 0; @@ -369,14 +375,15 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, continue; if (le16_to_cpu(e->key_size) < SIZEOF_ATTRIBUTE_FILENAME) - return -1; + return -EINVAL; ctx->pos = vbo + off; /* Submit the name to the filldir callback. */ - err = ntfs_filldir(sbi, ni, e, name, ctx); - if (err) - return err; + if (!ntfs_dir_emit(sbi, ni, e, name, ctx)) { + /* ctx is full. */ + return +1; + } } } @@ -475,8 +482,6 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx) vbo = (u64)bit << index_bits; if (vbo >= i_size) { - ntfs_inode_err(dir, "Looks like your dir is corrupt"); - ctx->pos = eod; err = -EINVAL; goto out; } @@ -499,9 +504,16 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx) __putname(name); put_indx_node(node); - if (err == -ENOENT) { + if (err == 1) { + /* 'ctx' is full. */ + err = 0; + } else if (err == -ENOENT) { err = 0; ctx->pos = pos; + } else if (err < 0) { + if (err == -EINVAL) + ntfs_inode_err(dir, "directory corrupted"); + ctx->pos = eod; } return err; From ab6a217ac57741ec9649457d70bbc43caf0a922d Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 19 Apr 2024 14:29:32 +0930 Subject: [PATCH 237/374] btrfs: slightly loosen the requirement for qgroup removal [ Upstream commit a776bf5f3c2300cfdf8a195663460b1793ac9847 ] [BUG] Currently if one is utilizing "qgroups/drop_subtree_threshold" sysfs, and a snapshot with level higher than that value is dropped, we will not be able to delete the qgroup until next qgroup rescan: uuid=ffffffff-eeee-dddd-cccc-000000000000 wipefs -fa $dev mkfs.btrfs -f $dev -O quota -s 4k -n 4k -U $uuid mount $dev $mnt btrfs subvolume create $mnt/subv1/ for (( i = 0; i < 1024; i++ )); do xfs_io -f -c "pwrite 0 2k" $mnt/subv1/file_$i > /dev/null done sync btrfs subvolume snapshot $mnt/subv1 $mnt/snapshot btrfs quota enable $mnt btrfs quota rescan -w $mnt sync echo 1 > /sys/fs/btrfs/$uuid/qgroups/drop_subtree_threshold btrfs subvolume delete $mnt/snapshot btrfs subvolume sync $mnt btrfs qgroup show -prce --sync $mnt btrfs qgroup destroy 0/257 $mnt umount $mnt The final qgroup removal would fail with the following error: ERROR: unable to destroy quota group: Device or resource busy [CAUSE] The above script would generate a subvolume of level 2, then snapshot it, enable qgroup, set the drop_subtree_threshold, then drop the snapshot. Since the subvolume drop would meet the threshold, qgroup would be marked inconsistent and skip accounting to avoid hanging the system at transaction commit. But currently we do not allow a qgroup with any rfer/excl numbers to be dropped, and this is not really compatible with the new drop_subtree_threshold behavior. [FIX] Only require the strict zero rfer/excl/rfer_cmpr/excl_cmpr for squota mode. This is due to the fact that squota can never go inconsistent, and it can have dropped subvolume but with non-zero qgroup numbers for future accounting. For full qgroup mode, we only check if there is a subvolume for it. Reviewed-by: Boris Burkov Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/qgroup.c | 87 +++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 80 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index d4486518414d..24df83177007 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1750,13 +1750,55 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) return ret; } -static bool qgroup_has_usage(struct btrfs_qgroup *qgroup) +/* + * Return 0 if we can not delete the qgroup (not empty or has children etc). + * Return >0 if we can delete the qgroup. + * Return <0 for other errors during tree search. + */ +static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup) { - return (qgroup->rfer > 0 || qgroup->rfer_cmpr > 0 || - qgroup->excl > 0 || qgroup->excl_cmpr > 0 || - qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] > 0 || - qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] > 0 || - qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > 0); + struct btrfs_key key; + struct btrfs_path *path; + int ret; + + /* + * Squota would never be inconsistent, but there can still be case + * where a dropped subvolume still has qgroup numbers, and squota + * relies on such qgroup for future accounting. + * + * So for squota, do not allow dropping any non-zero qgroup. + */ + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && + (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr)) + return 0; + + /* For higher level qgroup, we can only delete it if it has no child. */ + if (btrfs_qgroup_level(qgroup->qgroupid)) { + if (!list_empty(&qgroup->members)) + return 0; + return 1; + } + + /* + * For level-0 qgroups, we can only delete it if it has no subvolume + * for it. + * This means even a subvolume is unlinked but not yet fully dropped, + * we can not delete the qgroup. + */ + key.objectid = qgroup->qgroupid; + key.type = BTRFS_ROOT_ITEM_KEY; + key.offset = -1ULL; + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + ret = btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL); + btrfs_free_path(path); + /* + * The @ret from btrfs_find_root() exactly matches our definition for + * the return value, thus can be returned directly. + */ + return ret; } int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) @@ -1778,7 +1820,10 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) goto out; } - if (is_fstree(qgroupid) && qgroup_has_usage(qgroup)) { + ret = can_delete_qgroup(fs_info, qgroup); + if (ret < 0) + goto out; + if (ret == 0) { ret = -EBUSY; goto out; } @@ -1803,6 +1848,34 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) } spin_lock(&fs_info->qgroup_lock); + /* + * Warn on reserved space. The subvolume should has no child nor + * corresponding subvolume. + * Thus its reserved space should all be zero, no matter if qgroup + * is consistent or the mode. + */ + WARN_ON(qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] || + qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] || + qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]); + /* + * The same for rfer/excl numbers, but that's only if our qgroup is + * consistent and if it's in regular qgroup mode. + * For simple mode it's not as accurate thus we can hit non-zero values + * very frequently. + */ + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL && + !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) { + if (WARN_ON(qgroup->rfer || qgroup->excl || + qgroup->rfer_cmpr || qgroup->excl_cmpr)) { + btrfs_warn_rl(fs_info, +"to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu", + btrfs_qgroup_level(qgroup->qgroupid), + btrfs_qgroup_subvolid(qgroup->qgroupid), + qgroup->rfer, qgroup->rfer_cmpr, + qgroup->excl, qgroup->excl_cmpr); + qgroup_mark_inconsistent(fs_info); + } + } del_qgroup_rb(fs_info, qgroupid); spin_unlock(&fs_info->qgroup_lock); From 704c359b4093a2af650a20eaa030c435d7c30f91 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 7 May 2024 14:12:10 -0400 Subject: [PATCH 238/374] btrfs: don't BUG_ON on ENOMEM from btrfs_lookup_extent_info() in walk_down_proc() [ Upstream commit a580fb2c3479d993556e1c31b237c9e5be4944a3 ] We handle errors here properly, ENOMEM isn't fatal, return the error. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/extent-tree.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 8bf980123c5c..0effe13ae459 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5339,7 +5339,6 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, &wc->refs[level], &wc->flags[level], NULL); - BUG_ON(ret == -ENOMEM); if (ret) return ret; BUG_ON(wc->refs[level] == 0); From a7dfb14798bc57245b6ca2c7245047762898bfd8 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 7 May 2024 14:12:12 -0400 Subject: [PATCH 239/374] btrfs: replace BUG_ON with ASSERT in walk_down_proc() [ Upstream commit 1f9d44c0a12730a24f8bb75c5e1102207413cc9b ] We have a couple of areas where we check to make sure the tree block is locked before looking up or messing with references. This is old code so it has this as BUG_ON(). Convert this to ASSERT() for developers. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/extent-tree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0effe13ae459..d107f5809eae 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5333,7 +5333,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, if (lookup_info && ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { - BUG_ON(!path->locks[level]); + ASSERT(path->locks[level]); ret = btrfs_lookup_extent_info(trans, fs_info, eb->start, level, 1, &wc->refs[level], @@ -5357,7 +5357,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, /* wc->stage == UPDATE_BACKREF */ if (!(wc->flags[level] & flag)) { - BUG_ON(!path->locks[level]); + ASSERT(path->locks[level]); ret = btrfs_inc_ref(trans, root, eb, 1); BUG_ON(ret); /* -ENOMEM */ ret = btrfs_dec_ref(trans, root, eb, 0); From 03804641ec2d0da4fa088ad21c88e703d151ce16 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 7 May 2024 14:12:13 -0400 Subject: [PATCH 240/374] btrfs: clean up our handling of refs == 0 in snapshot delete [ Upstream commit b8ccef048354074a548f108e51d0557d6adfd3a3 ] In reada we BUG_ON(refs == 0), which could be unkind since we aren't holding a lock on the extent leaf and thus could get a transient incorrect answer. In walk_down_proc we also BUG_ON(refs == 0), which could happen if we have extent tree corruption. Change that to return -EUCLEAN. In do_walk_down() we catch this case and handle it correctly, however we return -EIO, which -EUCLEAN is a more appropriate error code. Finally in walk_up_proc we have the same BUG_ON(refs == 0), so convert that to proper error handling. Also adjust the error message so we can actually do something with the information. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/extent-tree.c | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d107f5809eae..96cec4d6b447 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5275,7 +5275,15 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans, /* We don't care about errors in readahead. */ if (ret < 0) continue; - BUG_ON(refs == 0); + + /* + * This could be racey, it's conceivable that we raced and end + * up with a bogus refs count, if that's the case just skip, if + * we are actually corrupt we will notice when we look up + * everything again with our locks. + */ + if (refs == 0) + continue; if (wc->stage == DROP_REFERENCE) { if (refs == 1) @@ -5341,7 +5349,11 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, NULL); if (ret) return ret; - BUG_ON(wc->refs[level] == 0); + if (unlikely(wc->refs[level] == 0)) { + btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0", + eb->start); + return -EUCLEAN; + } } if (wc->stage == DROP_REFERENCE) { @@ -5514,8 +5526,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, goto out_unlock; if (unlikely(wc->refs[level - 1] == 0)) { - btrfs_err(fs_info, "Missing references."); - ret = -EIO; + btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0", + bytenr); + ret = -EUCLEAN; goto out_unlock; } *lookup_info = 0; @@ -5718,7 +5731,12 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, path->locks[level] = 0; return ret; } - BUG_ON(wc->refs[level] == 0); + if (unlikely(wc->refs[level] == 0)) { + btrfs_tree_unlock_rw(eb, path->locks[level]); + btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0", + eb->start); + return -EUCLEAN; + } if (wc->refs[level] == 1) { btrfs_tree_unlock_rw(eb, path->locks[level]); path->locks[level] = 0; From a7f16a7a709845855cb5a0e080a52bda5873f9de Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 7 May 2024 14:12:15 -0400 Subject: [PATCH 241/374] btrfs: handle errors from btrfs_dec_ref() properly [ Upstream commit 5eb178f373b4f16f3b42d55ff88fc94dd95b93b1 ] In walk_up_proc() we BUG_ON(ret) from btrfs_dec_ref(). This is incorrect, we have proper error handling here, return the error. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/extent-tree.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 96cec4d6b447..033eb428ffcd 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5754,7 +5754,10 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, ret = btrfs_dec_ref(trans, root, eb, 1); else ret = btrfs_dec_ref(trans, root, eb, 0); - BUG_ON(ret); /* -ENOMEM */ + if (ret) { + btrfs_abort_transaction(trans, ret); + return ret; + } if (is_fstree(btrfs_root_id(root))) { ret = btrfs_qgroup_trace_leaf_items(trans, eb); if (ret) { From f895db00c65e5d77c437cce946da9ec29dcdf563 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 18 Jun 2024 15:55:16 +0100 Subject: [PATCH 242/374] btrfs: replace BUG_ON() with error handling at update_ref_for_cow() [ Upstream commit b56329a782314fde5b61058e2a25097af7ccb675 ] Instead of a BUG_ON() just return an error, log an error message and abort the transaction in case we find an extent buffer belonging to the relocation tree that doesn't have the full backref flag set. This is unexpected and should never happen (save for bugs or a potential bad memory). Reviewed-by: Qu Wenruo Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/ctree.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 8a791b648ac5..f56914507fce 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -462,8 +462,16 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, } owner = btrfs_header_owner(buf); - BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && - !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); + if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID && + !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) { + btrfs_crit(fs_info, +"found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set", + buf->start, btrfs_header_level(buf), + btrfs_root_id(root), refs, flags); + ret = -EUCLEAN; + btrfs_abort_transaction(trans, ret); + return ret; + } if (refs > 1) { if ((owner == btrfs_root_id(root) || From ef9a8b73c8b60b27d9db4787e624a3438ffe8428 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 18 Jun 2024 12:15:01 +0100 Subject: [PATCH 243/374] btrfs: don't BUG_ON() when 0 reference count at btrfs_lookup_extent_info() [ Upstream commit 28cb13f29faf6290597b24b728dc3100c019356f ] Instead of doing a BUG_ON() handle the error by returning -EUCLEAN, aborting the transaction and logging an error message. Reviewed-by: Qu Wenruo Signed-off-by: Filipe Manana Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/extent-tree.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 033eb428ffcd..55be8a7f0bb1 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -173,9 +173,16 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); num_refs = btrfs_extent_refs(leaf, ei); + if (unlikely(num_refs == 0)) { + ret = -EUCLEAN; + btrfs_err(fs_info, + "unexpected zero reference count for extent item (%llu %u %llu)", + key.objectid, key.type, key.offset); + btrfs_abort_transaction(trans, ret); + goto out_free; + } extent_flags = btrfs_extent_flags(leaf, ei); owner = btrfs_get_extent_owner_root(fs_info, leaf, path->slots[0]); - BUG_ON(num_refs == 0); } else { num_refs = 0; extent_flags = 0; @@ -205,10 +212,19 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, goto search_again; } spin_lock(&head->lock); - if (head->extent_op && head->extent_op->update_flags) + if (head->extent_op && head->extent_op->update_flags) { extent_flags |= head->extent_op->flags_to_set; - else - BUG_ON(num_refs == 0); + } else if (unlikely(num_refs == 0)) { + spin_unlock(&head->lock); + mutex_unlock(&head->mutex); + spin_unlock(&delayed_refs->lock); + ret = -EUCLEAN; + btrfs_err(fs_info, + "unexpected zero reference count for extent %llu (%s)", + bytenr, metadata ? "metadata" : "data"); + btrfs_abort_transaction(trans, ret); + goto out_free; + } num_refs += head->ref_mod; spin_unlock(&head->lock); From 101737d8b88dbd4be6010bac398fe810f1950036 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 10 Jul 2024 10:40:42 -0700 Subject: [PATCH 244/374] ethtool: fail closed if we can't get max channel used in indirection tables [ Upstream commit 2899d58462ba868287d6ff3acad3675e7adf934f ] Commit 0d1b7d6c9274 ("bnxt: fix crashes when reducing ring count with active RSS contexts") proves that allowing indirection table to contain channels with out of bounds IDs may lead to crashes. Currently the max channel check in the core gets skipped if driver can't fetch the indirection table or when we can't allocate memory. Both of those conditions should be extremely rare but if they do happen we should try to be safe and fail the channel change. Reviewed-by: Jacob Keller Link: https://patch.msgid.link/20240710174043.754664-2-kuba@kernel.org Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ethtool/channels.c | 6 ++---- net/ethtool/common.c | 26 +++++++++++++++----------- net/ethtool/common.h | 2 +- net/ethtool/ioctl.c | 4 +--- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c index 7b4bbd674bae..cee188da54f8 100644 --- a/net/ethtool/channels.c +++ b/net/ethtool/channels.c @@ -171,11 +171,9 @@ ethnl_set_channels(struct ethnl_req_info *req_info, struct genl_info *info) */ if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use)) max_rxnfc_in_use = 0; - if (!netif_is_rxfh_configured(dev) || - ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use)) - max_rxfh_in_use = 0; + max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev); if (channels.combined_count + channels.rx_count <= max_rxfh_in_use) { - GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings"); + GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing indirection table (%d)", max_rxfh_in_use); return -EINVAL; } if (channels.combined_count + channels.rx_count <= max_rxnfc_in_use) { diff --git a/net/ethtool/common.c b/net/ethtool/common.c index 6b2a360dcdf0..8a62375ebd1f 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -587,35 +587,39 @@ int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max) return err; } -int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max) +u32 ethtool_get_max_rxfh_channel(struct net_device *dev) { struct ethtool_rxfh_param rxfh = {}; - u32 dev_size, current_max = 0; + u32 dev_size, current_max; int ret; + if (!netif_is_rxfh_configured(dev)) + return 0; + if (!dev->ethtool_ops->get_rxfh_indir_size || !dev->ethtool_ops->get_rxfh) - return -EOPNOTSUPP; + return 0; dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); if (dev_size == 0) - return -EOPNOTSUPP; + return 0; rxfh.indir = kcalloc(dev_size, sizeof(rxfh.indir[0]), GFP_USER); if (!rxfh.indir) - return -ENOMEM; + return U32_MAX; ret = dev->ethtool_ops->get_rxfh(dev, &rxfh); - if (ret) - goto out; + if (ret) { + current_max = U32_MAX; + goto out_free; + } + current_max = 0; while (dev_size--) current_max = max(current_max, rxfh.indir[dev_size]); - *max = current_max; - -out: +out_free: kfree(rxfh.indir); - return ret; + return current_max; } int ethtool_check_ops(const struct ethtool_ops *ops) diff --git a/net/ethtool/common.h b/net/ethtool/common.h index 28b8aaaf9bcb..b55705a9ad5a 100644 --- a/net/ethtool/common.h +++ b/net/ethtool/common.h @@ -42,7 +42,7 @@ int __ethtool_get_link(struct net_device *dev); bool convert_legacy_settings_to_link_ksettings( struct ethtool_link_ksettings *link_ksettings, const struct ethtool_cmd *legacy_settings); -int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max); +u32 ethtool_get_max_rxfh_channel(struct net_device *dev); int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max); int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info); diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index f99fd564d0ee..2f5b69d5d4b0 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1928,9 +1928,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev, * indirection table/rxnfc settings */ if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use)) max_rxnfc_in_use = 0; - if (!netif_is_rxfh_configured(dev) || - ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use)) - max_rxfh_in_use = 0; + max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev); if (channels.combined_count + channels.rx_count <= max_t(u64, max_rxnfc_in_use, max_rxfh_in_use)) return -EINVAL; From 142ef3a1c98ffdad95d96e8a77b6cac53e783009 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 2 Jul 2024 22:29:51 -0700 Subject: [PATCH 245/374] cxl/region: Verify target positions using the ordered target list [ Upstream commit 82a3e3a235633aa0575fac9507d648dd80f3437f ] When a root decoder is configured the interleave target list is read from the BIOS populated CFMWS structure. Per the CXL spec 3.1 Table 9-22 the target list is in interleave order. The CXL driver populates its decoder target list in the same order and stores it in 'struct cxl_switch_decoder' field "@target: active ordered target list in current decoder configuration" Given the promise of an ordered list, the driver can stop duplicating the work of BIOS and simply check target positions against the ordered list during region configuration. The simplified check against the ordered list is presented here. A follow-on patch will remove the unused code. For Modulo arithmetic this is not a fix, only a simplification. For XOR arithmetic this is a fix for HB IW of 3,6,12. Fixes: f9db85bfec0d ("cxl/acpi: Support CXL XOR Interleave Math (CXIMS)") Signed-off-by: Alison Schofield Reviewed-by: Dan Williams Reviewed-by: Jonathan Cameron Link: https://patch.msgid.link/35d08d3aba08fee0f9b86ab1cef0c25116ca8a55.1719980933.git.alison.schofield@intel.com Signed-off-by: Dave Jiang Signed-off-by: Sasha Levin --- drivers/cxl/core/region.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index cd9ccdc6bc81..0e30e0a29d40 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -1632,10 +1632,13 @@ static int cxl_region_attach_position(struct cxl_region *cxlr, const struct cxl_dport *dport, int pos) { struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; + struct cxl_decoder *cxld = &cxlsd->cxld; + int iw = cxld->interleave_ways; struct cxl_port *iter; int rc; - if (cxlrd->calc_hb(cxlrd, pos) != dport) { + if (dport != cxlrd->cxlsd.target[pos % iw]) { dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n", dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), dev_name(&cxlrd->cxlsd.cxld.dev)); From 6ffa0fb4f842b5eea1d9e4e5b71401d6556040cf Mon Sep 17 00:00:00 2001 From: "yang.zhang" Date: Wed, 8 May 2024 10:24:45 +0800 Subject: [PATCH 246/374] riscv: set trap vector earlier [ Upstream commit 6ad8735994b854b23c824dd6b1dd2126e893a3b4 ] The exception vector of the booting hart is not set before enabling the mmu and then still points to the value of the previous firmware, typically _start. That makes it hard to debug setup_vm() when bad things happen. So fix that by setting the exception vector earlier. Reviewed-by: Alexandre Ghiti Signed-off-by: yang.zhang Link: https://lore.kernel.org/r/20240508022445.6131-1-gaoshanliukou@163.com Signed-off-by: Palmer Dabbelt Signed-off-by: Sasha Levin --- arch/riscv/kernel/head.S | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index a00f7523cb91..356d5397b2a2 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -305,6 +305,9 @@ SYM_CODE_START(_start_kernel) #else mv a0, a1 #endif /* CONFIG_BUILTIN_DTB */ + /* Set trap vector to spin forever to help debug */ + la a3, .Lsecondary_park + csrw CSR_TVEC, a3 call setup_vm #ifdef CONFIG_MMU la a0, early_pg_dir From 81c68e218ab883dfa368460a59b674084c0240da Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 30 May 2024 18:04:35 -0700 Subject: [PATCH 247/374] PCI: Add missing bridge lock to pci_bus_lock() [ Upstream commit a4e772898f8bf2e7e1cf661a12c60a5612c4afab ] One of the true positives that the cfg_access_lock lockdep effort identified is this sequence: WARNING: CPU: 14 PID: 1 at drivers/pci/pci.c:4886 pci_bridge_secondary_bus_reset+0x5d/0x70 RIP: 0010:pci_bridge_secondary_bus_reset+0x5d/0x70 Call Trace: ? __warn+0x8c/0x190 ? pci_bridge_secondary_bus_reset+0x5d/0x70 ? report_bug+0x1f8/0x200 ? handle_bug+0x3c/0x70 ? exc_invalid_op+0x18/0x70 ? asm_exc_invalid_op+0x1a/0x20 ? pci_bridge_secondary_bus_reset+0x5d/0x70 pci_reset_bus+0x1d8/0x270 vmd_probe+0x778/0xa10 pci_device_probe+0x95/0x120 Where pci_reset_bus() users are triggering unlocked secondary bus resets. Ironically pci_bus_reset(), several calls down from pci_reset_bus(), uses pci_bus_lock() before issuing the reset which locks everything *but* the bridge itself. For the same motivation as adding: bridge = pci_upstream_bridge(dev); if (bridge) pci_dev_lock(bridge); to pci_reset_function() for the "bus" and "cxl_bus" reset cases, add pci_dev_lock() for @bus->self to pci_bus_lock(). Link: https://lore.kernel.org/r/171711747501.1628941.15217746952476635316.stgit@dwillia2-xfh.jf.intel.com Reported-by: Imre Deak Closes: http://lore.kernel.org/r/6657833b3b5ae_14984b29437@dwillia2-xfh.jf.intel.com.notmuch Signed-off-by: Dan Williams Signed-off-by: Keith Busch [bhelgaas: squash in recursive locking deadlock fix from Keith Busch: https://lore.kernel.org/r/20240711193650.701834-1-kbusch@meta.com] Signed-off-by: Bjorn Helgaas Tested-by: Hans de Goede Tested-by: Kalle Valo Reviewed-by: Dave Jiang Signed-off-by: Sasha Levin --- drivers/pci/pci.c | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index dff09e4892d3..8db214d4b1d4 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5441,10 +5441,12 @@ static void pci_bus_lock(struct pci_bus *bus) { struct pci_dev *dev; + pci_dev_lock(bus->self); list_for_each_entry(dev, &bus->devices, bus_list) { - pci_dev_lock(dev); if (dev->subordinate) pci_bus_lock(dev->subordinate); + else + pci_dev_lock(dev); } } @@ -5456,8 +5458,10 @@ static void pci_bus_unlock(struct pci_bus *bus) list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->subordinate) pci_bus_unlock(dev->subordinate); - pci_dev_unlock(dev); + else + pci_dev_unlock(dev); } + pci_dev_unlock(bus->self); } /* Return 1 on successful lock, 0 on contention */ @@ -5465,15 +5469,15 @@ static int pci_bus_trylock(struct pci_bus *bus) { struct pci_dev *dev; + if (!pci_dev_trylock(bus->self)) + return 0; + list_for_each_entry(dev, &bus->devices, bus_list) { - if (!pci_dev_trylock(dev)) - goto unlock; if (dev->subordinate) { - if (!pci_bus_trylock(dev->subordinate)) { - pci_dev_unlock(dev); + if (!pci_bus_trylock(dev->subordinate)) goto unlock; - } - } + } else if (!pci_dev_trylock(dev)) + goto unlock; } return 1; @@ -5481,8 +5485,10 @@ static int pci_bus_trylock(struct pci_bus *bus) list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) { if (dev->subordinate) pci_bus_unlock(dev->subordinate); - pci_dev_unlock(dev); + else + pci_dev_unlock(dev); } + pci_dev_unlock(bus->self); return 0; } @@ -5514,9 +5520,10 @@ static void pci_slot_lock(struct pci_slot *slot) list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; - pci_dev_lock(dev); if (dev->subordinate) pci_bus_lock(dev->subordinate); + else + pci_dev_lock(dev); } } @@ -5542,14 +5549,13 @@ static int pci_slot_trylock(struct pci_slot *slot) list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; - if (!pci_dev_trylock(dev)) - goto unlock; if (dev->subordinate) { if (!pci_bus_trylock(dev->subordinate)) { pci_dev_unlock(dev); goto unlock; } - } + } else if (!pci_dev_trylock(dev)) + goto unlock; } return 1; @@ -5560,7 +5566,8 @@ static int pci_slot_trylock(struct pci_slot *slot) continue; if (dev->subordinate) pci_bus_unlock(dev->subordinate); - pci_dev_unlock(dev); + else + pci_dev_unlock(dev); } return 0; } From 2d82277f6619df4ad9deb242dac4873c059eaa80 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 10 Jul 2024 10:12:45 -0700 Subject: [PATCH 248/374] tcp: Don't drop SYN+ACK for simultaneous connect(). [ Upstream commit 23e89e8ee7be73e21200947885a6d3a109a2c58d ] RFC 9293 states that in the case of simultaneous connect(), the connection gets established when SYN+ACK is received. [0] TCP Peer A TCP Peer B 1. CLOSED CLOSED 2. SYN-SENT --> ... 3. SYN-RECEIVED <-- <-- SYN-SENT 4. ... --> SYN-RECEIVED 5. SYN-RECEIVED --> ... 6. ESTABLISHED <-- <-- SYN-RECEIVED 7. ... --> ESTABLISHED However, since commit 0c24604b68fc ("tcp: implement RFC 5961 4.2"), such a SYN+ACK is dropped in tcp_validate_incoming() and responded with Challenge ACK. For example, the write() syscall in the following packetdrill script fails with -EAGAIN, and wrong SNMP stats get incremented. 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3 +0 connect(3, ..., ...) = -1 EINPROGRESS (Operation now in progress) +0 > S 0:0(0) +0 < S 0:0(0) win 1000 +0 > S. 0:0(0) ack 1 +0 < S. 0:0(0) ack 1 win 1000 +0 write(3, ..., 100) = 100 +0 > P. 1:101(100) ack 1 -- # packetdrill cross-synack.pkt cross-synack.pkt:13: runtime error in write call: Expected result 100 but got -1 with errno 11 (Resource temporarily unavailable) # nstat ... TcpExtTCPChallengeACK 1 0.0 TcpExtTCPSYNChallenge 1 0.0 The problem is that bpf_skops_established() is triggered by the Challenge ACK instead of SYN+ACK. This causes the bpf prog to miss the chance to check if the peer supports a TCP option that is expected to be exchanged in SYN and SYN+ACK. Let's accept a bare SYN+ACK for active-open TCP_SYN_RECV sockets to avoid such a situation. Note that tcp_ack_snd_check() in tcp_rcv_state_process() is skipped not to send an unnecessary ACK, but this could be a bit risky for net.git, so this targets for net-next. Link: https://www.rfc-editor.org/rfc/rfc9293.html#section-3.5-7 [0] Signed-off-by: Kuniyuki Iwashima Reviewed-by: Eric Dumazet Link: https://patch.msgid.link/20240710171246.87533-2-kuniyu@amazon.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ipv4/tcp_input.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2c52f6dcbd29..e0d870d3c9b8 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6004,6 +6004,11 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, * RFC 5961 4.2 : Send a challenge ack */ if (th->syn) { + if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack && + TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq && + TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt && + TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt) + goto pass; syn_challenge: if (syn_inerr) TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); @@ -6013,6 +6018,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, goto discard; } +pass: bpf_skops_parse_hdr(sk, skb); return true; @@ -6819,6 +6825,9 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tcp_fast_path_on(tp); if (sk->sk_shutdown & SEND_SHUTDOWN) tcp_shutdown(sk, SEND_SHUTDOWN); + + if (sk->sk_socket) + goto consume; break; case TCP_FIN_WAIT1: { From 056e0cd381d59a9124b7c43dd715e15f56a11635 Mon Sep 17 00:00:00 2001 From: Neeraj Sanjay Kale Date: Wed, 15 May 2024 12:36:55 +0530 Subject: [PATCH 249/374] Bluetooth: btnxpuart: Fix Null pointer dereference in btnxpuart_flush() [ Upstream commit c68bbf5e334b35b36ac5b9f0419f1f93f796bad1 ] This adds a check before freeing the rx->skb in flush and close functions to handle the kernel crash seen while removing driver after FW download fails or before FW download completes. dmesg log: [ 54.634586] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000080 [ 54.643398] Mem abort info: [ 54.646204] ESR = 0x0000000096000004 [ 54.649964] EC = 0x25: DABT (current EL), IL = 32 bits [ 54.655286] SET = 0, FnV = 0 [ 54.658348] EA = 0, S1PTW = 0 [ 54.661498] FSC = 0x04: level 0 translation fault [ 54.666391] Data abort info: [ 54.669273] ISV = 0, ISS = 0x00000004, ISS2 = 0x00000000 [ 54.674768] CM = 0, WnR = 0, TnD = 0, TagAccess = 0 [ 54.674771] GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0 [ 54.674775] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000048860000 [ 54.674780] [0000000000000080] pgd=0000000000000000, p4d=0000000000000000 [ 54.703880] Internal error: Oops: 0000000096000004 [#1] PREEMPT SMP [ 54.710152] Modules linked in: btnxpuart(-) overlay fsl_jr_uio caam_jr caamkeyblob_desc caamhash_desc caamalg_desc crypto_engine authenc libdes crct10dif_ce polyval_ce polyval_generic snd_soc_imx_spdif snd_soc_imx_card snd_soc_ak5558 snd_soc_ak4458 caam secvio error snd_soc_fsl_micfil snd_soc_fsl_spdif snd_soc_fsl_sai snd_soc_fsl_utils imx_pcm_dma gpio_ir_recv rc_core sch_fq_codel fuse [ 54.744357] CPU: 3 PID: 72 Comm: kworker/u9:0 Not tainted 6.6.3-otbr-g128004619037 #2 [ 54.744364] Hardware name: FSL i.MX8MM EVK board (DT) [ 54.744368] Workqueue: hci0 hci_power_on [ 54.757244] pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 54.757249] pc : kfree_skb_reason+0x18/0xb0 [ 54.772299] lr : btnxpuart_flush+0x40/0x58 [btnxpuart] [ 54.782921] sp : ffff8000805ebca0 [ 54.782923] x29: ffff8000805ebca0 x28: ffffa5c6cf1869c0 x27: ffffa5c6cf186000 [ 54.782931] x26: ffff377b84852400 x25: ffff377b848523c0 x24: ffff377b845e7230 [ 54.782938] x23: ffffa5c6ce8dbe08 x22: ffffa5c6ceb65410 x21: 00000000ffffff92 [ 54.782945] x20: ffffa5c6ce8dbe98 x19: ffffffffffffffac x18: ffffffffffffffff [ 54.807651] x17: 0000000000000000 x16: ffffa5c6ce2824ec x15: ffff8001005eb857 [ 54.821917] x14: 0000000000000000 x13: ffffa5c6cf1a02e0 x12: 0000000000000642 [ 54.821924] x11: 0000000000000040 x10: ffffa5c6cf19d690 x9 : ffffa5c6cf19d688 [ 54.821931] x8 : ffff377b86000028 x7 : 0000000000000000 x6 : 0000000000000000 [ 54.821938] x5 : ffff377b86000000 x4 : 0000000000000000 x3 : 0000000000000000 [ 54.843331] x2 : 0000000000000000 x1 : 0000000000000002 x0 : ffffffffffffffac [ 54.857599] Call trace: [ 54.857601] kfree_skb_reason+0x18/0xb0 [ 54.863878] btnxpuart_flush+0x40/0x58 [btnxpuart] [ 54.863888] hci_dev_open_sync+0x3a8/0xa04 [ 54.872773] hci_power_on+0x54/0x2e4 [ 54.881832] process_one_work+0x138/0x260 [ 54.881842] worker_thread+0x32c/0x438 [ 54.881847] kthread+0x118/0x11c [ 54.881853] ret_from_fork+0x10/0x20 [ 54.896406] Code: a9be7bfd 910003fd f9000bf3 aa0003f3 (b940d400) [ 54.896410] ---[ end trace 0000000000000000 ]--- Signed-off-by: Neeraj Sanjay Kale Tested-by: Guillaume Legoupil Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Sasha Levin --- drivers/bluetooth/btnxpuart.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index eeba2d26d1cb..5890ecd8e948 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -1326,8 +1326,10 @@ static int btnxpuart_close(struct hci_dev *hdev) serdev_device_close(nxpdev->serdev); skb_queue_purge(&nxpdev->txq); - kfree_skb(nxpdev->rx_skb); - nxpdev->rx_skb = NULL; + if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) { + kfree_skb(nxpdev->rx_skb); + nxpdev->rx_skb = NULL; + } clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state); return 0; } @@ -1342,8 +1344,10 @@ static int btnxpuart_flush(struct hci_dev *hdev) cancel_work_sync(&nxpdev->tx_work); - kfree_skb(nxpdev->rx_skb); - nxpdev->rx_skb = NULL; + if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) { + kfree_skb(nxpdev->rx_skb); + nxpdev->rx_skb = NULL; + } return 0; } From 42e6607d33c7df63737d3a83d6a93affc80d0d5d Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 14 Jul 2024 01:53:32 +0300 Subject: [PATCH 250/374] net: dpaa: avoid on-stack arrays of NR_CPUS elements [ Upstream commit 555a05d84ca2c587e2d4777006e2c2fb3dfbd91d ] The dpaa-eth driver is written for PowerPC and Arm SoCs which have 1-24 CPUs. It depends on CONFIG_NR_CPUS having a reasonably small value in Kconfig. Otherwise, there are 2 functions which allocate on-stack arrays of NR_CPUS elements, and these can quickly explode in size, leading to warnings such as: drivers/net/ethernet/freescale/dpaa/dpaa_eth.c:3280:12: warning: stack frame size (16664) exceeds limit (2048) in 'dpaa_eth_probe' [-Wframe-larger-than] The problem is twofold: - Reducing the array size to the boot-time num_possible_cpus() (rather than the compile-time NR_CPUS) creates a variable-length array, which should be avoided in the Linux kernel. - Using NR_CPUS as an array size makes the driver blow up in stack consumption with generic, as opposed to hand-crafted, .config files. A simple solution is to use dynamic allocation for num_possible_cpus() elements (aka a small number determined at runtime). Link: https://lore.kernel.org/all/202406261920.l5pzM1rj-lkp@intel.com/ Signed-off-by: Vladimir Oltean Reviewed-by: Breno Leitao Acked-by: Madalin Bucur Link: https://patch.msgid.link/20240713225336.1746343-2-vladimir.oltean@nxp.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- .../net/ethernet/freescale/dpaa/dpaa_eth.c | 20 ++++++++++++++----- .../ethernet/freescale/dpaa/dpaa_ethtool.c | 10 +++++++++- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index baa0b3c2ce6f..946c3d3b69d9 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -931,14 +931,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv, } } -static void dpaa_fq_setup(struct dpaa_priv *priv, - const struct dpaa_fq_cbs *fq_cbs, - struct fman_port *tx_port) +static int dpaa_fq_setup(struct dpaa_priv *priv, + const struct dpaa_fq_cbs *fq_cbs, + struct fman_port *tx_port) { int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu; const cpumask_t *affine_cpus = qman_affine_cpus(); - u16 channels[NR_CPUS]; struct dpaa_fq *fq; + u16 *channels; + + channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL); + if (!channels) + return -ENOMEM; for_each_cpu_and(cpu, affine_cpus, cpu_online_mask) channels[num_portals++] = qman_affine_channel(cpu); @@ -997,6 +1001,10 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, break; } } + + kfree(channels); + + return 0; } static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, @@ -3416,7 +3424,9 @@ static int dpaa_eth_probe(struct platform_device *pdev) */ dpaa_eth_add_channel(priv->channel, &pdev->dev); - dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); + err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); + if (err) + goto free_dpaa_bps; /* Create a congestion group for this netdev, with * dynamically-allocated CGR ID. diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 5bd0b36d1feb..3f8cd4a7d845 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -457,12 +457,16 @@ static int dpaa_set_coalesce(struct net_device *dev, struct netlink_ext_ack *extack) { const cpumask_t *cpus = qman_affine_cpus(); - bool needs_revert[NR_CPUS] = {false}; struct qman_portal *portal; u32 period, prev_period; u8 thresh, prev_thresh; + bool *needs_revert; int cpu, res; + needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL); + if (!needs_revert) + return -ENOMEM; + period = c->rx_coalesce_usecs; thresh = c->rx_max_coalesced_frames; @@ -485,6 +489,8 @@ static int dpaa_set_coalesce(struct net_device *dev, needs_revert[cpu] = true; } + kfree(needs_revert); + return 0; revert_values: @@ -498,6 +504,8 @@ static int dpaa_set_coalesce(struct net_device *dev, qman_dqrr_set_ithresh(portal, prev_thresh); } + kfree(needs_revert); + return res; } From 52d9d4f33b85a7fdc3e2d1d05a5b1c1032ab7cfb Mon Sep 17 00:00:00 2001 From: YiPeng Chai Date: Fri, 12 Apr 2024 13:46:03 +0800 Subject: [PATCH 251/374] drm/amdgpu: add mutex to protect ras shared memory [ Upstream commit b3fb79cda5688a44a423c27b791f5456d801e49c ] Add mutex to protect ras shared memory. v2: Add TA_RAS_COMMAND__TRIGGER_ERROR command call status check. Signed-off-by: YiPeng Chai Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 123 ++++++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c | 2 + 3 files changed, 86 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index b3df27ce7663..ee19af2d20fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1584,6 +1584,68 @@ static void psp_ras_ta_check_status(struct psp_context *psp) } } +static int psp_ras_send_cmd(struct psp_context *psp, + enum ras_command cmd_id, void *in, void *out) +{ + struct ta_ras_shared_memory *ras_cmd; + uint32_t cmd = cmd_id; + int ret = 0; + + if (!in) + return -EINVAL; + + mutex_lock(&psp->ras_context.mutex); + ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; + memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); + + switch (cmd) { + case TA_RAS_COMMAND__ENABLE_FEATURES: + case TA_RAS_COMMAND__DISABLE_FEATURES: + memcpy(&ras_cmd->ras_in_message, + in, sizeof(ras_cmd->ras_in_message)); + break; + case TA_RAS_COMMAND__TRIGGER_ERROR: + memcpy(&ras_cmd->ras_in_message.trigger_error, + in, sizeof(ras_cmd->ras_in_message.trigger_error)); + break; + case TA_RAS_COMMAND__QUERY_ADDRESS: + memcpy(&ras_cmd->ras_in_message.address, + in, sizeof(ras_cmd->ras_in_message.address)); + break; + default: + dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd); + ret = -EINVAL; + goto err_out; + } + + ras_cmd->cmd_id = cmd; + ret = psp_ras_invoke(psp, ras_cmd->cmd_id); + + switch (cmd) { + case TA_RAS_COMMAND__TRIGGER_ERROR: + if (ret || psp->cmd_buf_mem->resp.status) + ret = -EINVAL; + else if (out) + memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status)); + break; + case TA_RAS_COMMAND__QUERY_ADDRESS: + if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) + ret = -EINVAL; + else if (out) + memcpy(out, + &ras_cmd->ras_out_message.address, + sizeof(ras_cmd->ras_out_message.address)); + break; + default: + break; + } + +err_out: + mutex_unlock(&psp->ras_context.mutex); + + return ret; +} + int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) { struct ta_ras_shared_memory *ras_cmd; @@ -1625,23 +1687,15 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) int psp_ras_enable_features(struct psp_context *psp, union ta_ras_cmd_input *info, bool enable) { - struct ta_ras_shared_memory *ras_cmd; + enum ras_command cmd_id; int ret; - if (!psp->ras_context.context.initialized) + if (!psp->ras_context.context.initialized || !info) return -EINVAL; - ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; - memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); - - if (enable) - ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; - else - ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; - - ras_cmd->ras_in_message = *info; - - ret = psp_ras_invoke(psp, ras_cmd->cmd_id); + cmd_id = enable ? + TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES; + ret = psp_ras_send_cmd(psp, cmd_id, info, NULL); if (ret) return -EINVAL; @@ -1665,6 +1719,8 @@ int psp_ras_terminate(struct psp_context *psp) psp->ras_context.context.initialized = false; + mutex_destroy(&psp->ras_context.mutex); + return ret; } @@ -1749,9 +1805,10 @@ int psp_ras_initialize(struct psp_context *psp) ret = psp_ta_load(psp, &psp->ras_context.context); - if (!ret && !ras_cmd->ras_status) + if (!ret && !ras_cmd->ras_status) { psp->ras_context.context.initialized = true; - else { + mutex_init(&psp->ras_context.mutex); + } else { if (ras_cmd->ras_status) dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); @@ -1765,12 +1822,12 @@ int psp_ras_initialize(struct psp_context *psp) int psp_ras_trigger_error(struct psp_context *psp, struct ta_ras_trigger_error_input *info, uint32_t instance_mask) { - struct ta_ras_shared_memory *ras_cmd; struct amdgpu_device *adev = psp->adev; int ret; uint32_t dev_mask; + uint32_t ras_status = 0; - if (!psp->ras_context.context.initialized) + if (!psp->ras_context.context.initialized || !info) return -EINVAL; switch (info->block_id) { @@ -1794,13 +1851,8 @@ int psp_ras_trigger_error(struct psp_context *psp, dev_mask &= AMDGPU_RAS_INST_MASK; info->sub_block_index |= dev_mask; - ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; - memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); - - ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; - ras_cmd->ras_in_message.trigger_error = *info; - - ret = psp_ras_invoke(psp, ras_cmd->cmd_id); + ret = psp_ras_send_cmd(psp, + TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status); if (ret) return -EINVAL; @@ -1810,9 +1862,9 @@ int psp_ras_trigger_error(struct psp_context *psp, if (amdgpu_ras_intr_triggered()) return 0; - if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) + if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) return -EACCES; - else if (ras_cmd->ras_status) + else if (ras_status) return -EINVAL; return 0; @@ -1822,25 +1874,16 @@ int psp_ras_query_address(struct psp_context *psp, struct ta_ras_query_address_input *addr_in, struct ta_ras_query_address_output *addr_out) { - struct ta_ras_shared_memory *ras_cmd; int ret; - if (!psp->ras_context.context.initialized) - return -EINVAL; - - ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; - memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); - - ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS; - ras_cmd->ras_in_message.address = *addr_in; - - ret = psp_ras_invoke(psp, ras_cmd->cmd_id); - if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) + if (!psp->ras_context.context.initialized || + !addr_in || !addr_out) return -EINVAL; - *addr_out = ras_cmd->ras_out_message.address; + ret = psp_ras_send_cmd(psp, + TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out); - return 0; + return ret; } // ras end diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 3635303e6548..74a96516c913 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -200,6 +200,7 @@ struct psp_xgmi_context { struct psp_ras_context { struct ta_context context; struct amdgpu_ras *ras; + struct mutex mutex; }; #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c index 9aff579c6abf..38face981c3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c @@ -351,6 +351,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size context->session_id = ta_id; + mutex_lock(&psp->ras_context.mutex); ret = prep_ta_mem_context(&context->mem_context, shared_buf, shared_buf_len); if (ret) goto err_free_shared_buf; @@ -369,6 +370,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size ret = -EFAULT; err_free_shared_buf: + mutex_unlock(&psp->ras_context.mutex); kfree(shared_buf); return ret; From 4794579335cac28a7acd7f66ab2d853a09d0885e Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 20 Jul 2024 22:41:07 +0800 Subject: [PATCH 252/374] LoongArch: Use correct API to map cmdline in relocate_kernel() [ Upstream commit 0124fbb4c6dba23dbdf80c829be68adbccde2722 ] fw_arg1 is in memory space rather than I/O space, so we should use early_memremap_ro() instead of early_ioremap() to map the cmdline. Moreover, we should unmap it after using. Suggested-by: Jiaxun Yang Reviewed-by: Jiaxun Yang Signed-off-by: Huacai Chen Signed-off-by: Sasha Levin --- arch/loongarch/kernel/relocate.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index 1acfa704c8d0..0eddd4a66b87 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -170,7 +171,7 @@ unsigned long __init relocate_kernel(void) unsigned long kernel_length; unsigned long random_offset = 0; void *location_new = _text; /* Default to original kernel start */ - char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */ + char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */ strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); @@ -182,6 +183,7 @@ unsigned long __init relocate_kernel(void) random_offset = (unsigned long)location_new - (unsigned long)(_text); #endif reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS; + early_memunmap(cmdline, COMMAND_LINE_SIZE); if (random_offset) { kernel_length = (long)(_end) - (long)(_text); From 66a490d7c5dd7935038147e2aa0913feeb5aeab3 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jul 2024 12:40:24 +0200 Subject: [PATCH 253/374] regmap: maple: work around gcc-14.1 false-positive warning [ Upstream commit 542440fd7b30983cae23e32bd22f69a076ec7ef4 ] With gcc-14.1, there is a false-postive -Wuninitialized warning in regcache_maple_drop: drivers/base/regmap/regcache-maple.c: In function 'regcache_maple_drop': drivers/base/regmap/regcache-maple.c:113:23: error: 'lower_index' is used uninitialized [-Werror=uninitialized] 113 | unsigned long lower_index, lower_last; | ^~~~~~~~~~~ drivers/base/regmap/regcache-maple.c:113:36: error: 'lower_last' is used uninitialized [-Werror=uninitialized] 113 | unsigned long lower_index, lower_last; | ^~~~~~~~~~ I've created a reduced test case to see if this needs to be reported as a gcc, but it appears that the gcc-14.x branch already has a change that turns this into a more sensible -Wmaybe-uninitialized warning, so I ended up not reporting it so far. The reduced test case also produces a warning for gcc-13 and gcc-12 but I don't see that with the version in the kernel. Link: https://godbolt.org/z/oKbohKqd3 Link: https://lore.kernel.org/all/CAMuHMdWj=FLmkazPbYKPevDrcym2_HDb_U7Mb9YE9ovrP0jJfA@mail.gmail.com/ Signed-off-by: Arnd Bergmann Link: https://patch.msgid.link/20240719104030.1382465-1-arnd@kernel.org Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/base/regmap/regcache-maple.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c index e42433404854..4c034c813126 100644 --- a/drivers/base/regmap/regcache-maple.c +++ b/drivers/base/regmap/regcache-maple.c @@ -110,7 +110,8 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min, struct maple_tree *mt = map->cache; MA_STATE(mas, mt, min, max); unsigned long *entry, *lower, *upper; - unsigned long lower_index, lower_last; + /* initialized to work around false-positive -Wuninitialized warning */ + unsigned long lower_index = 0, lower_last = 0; unsigned long upper_index, upper_last; int ret = 0; From cae59dffe65228da63fd8215d9b1c5747a3f6bbb Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Wed, 17 Jul 2024 21:43:22 +0200 Subject: [PATCH 254/374] s390/boot: Do not assume the decompressor range is reserved [ Upstream commit b798b685b42c9dbe508e59a74250d97c41bec35e ] When allocating a random memory range for .amode31 sections the minimal randomization address is 0. That does not lead to a possible overlap with the decompressor image (which also starts from 0) since by that time the image range is already reserved. Do not assume the decompressor range is reserved and always provide the minimal randomization address for .amode31 sections beyond the decompressor. That is a prerequisite for moving the lowcore memory address from NULL elsewhere. Signed-off-by: Alexander Gordeev Signed-off-by: Sven Schnelle Signed-off-by: Vasily Gorbik Signed-off-by: Sasha Levin --- arch/s390/boot/startup.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 6d88f241dd43..66ee97ac803d 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -476,8 +476,12 @@ void startup_kernel(void) * before the kernel started. Therefore, in case the two sections * overlap there is no risk of corrupting any data. */ - if (kaslr_enabled()) - amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G); + if (kaslr_enabled()) { + unsigned long amode31_min; + + amode31_min = (unsigned long)_decompressor_end; + amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G); + } if (!amode31_lma) amode31_lma = text_lma - vmlinux.amode31_size; physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size); From cec226f9b1fd6cf55bc157873aec61b523083e96 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 19 Jul 2024 15:19:02 +0100 Subject: [PATCH 255/374] cachefiles: Set the max subreq size for cache writes to MAX_RW_COUNT [ Upstream commit 51d37982bbac3ea0ca21b2797a9cb0044272b3aa ] Set the maximum size of a subrequest that writes to cachefiles to be MAX_RW_COUNT so that we don't overrun the maximum write we can make to the backing filesystem. Signed-off-by: David Howells Link: https://lore.kernel.org/r/1599005.1721398742@warthog.procyon.org.uk cc: Jeff Layton cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner Signed-off-by: Sasha Levin --- fs/cachefiles/io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c index e667dbcd20e8..a91acd03ee12 100644 --- a/fs/cachefiles/io.c +++ b/fs/cachefiles/io.c @@ -630,7 +630,7 @@ static void cachefiles_prepare_write_subreq(struct netfs_io_subrequest *subreq) _enter("W=%x[%x] %llx", wreq->debug_id, subreq->debug_index, subreq->start); - subreq->max_len = ULONG_MAX; + subreq->max_len = MAX_RW_COUNT; subreq->max_nr_segs = BIO_MAX_VECS; if (!cachefiles_cres_file(cres)) { From 8d5863cb33aa424fc27115ee945ad6b96ae2facb Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 23 Jul 2024 09:59:54 +0100 Subject: [PATCH 256/374] vfs: Fix potential circular locking through setxattr() and removexattr() [ Upstream commit c3a5e3e872f3688ae0dc57bb78ca633921d96a91 ] When using cachefiles, lockdep may emit something similar to the circular locking dependency notice below. The problem appears to stem from the following: (1) Cachefiles manipulates xattrs on the files in its cache when called from ->writepages(). (2) The setxattr() and removexattr() system call handlers get the name (and value) from userspace after taking the sb_writers lock, putting accesses of the vma->vm_lock and mm->mmap_lock inside of that. (3) The afs filesystem uses a per-inode lock to prevent multiple revalidation RPCs and in writeback vs truncate to prevent parallel operations from deadlocking against the server on one side and local page locks on the other. Fix this by moving the getting of the name and value in {get,remove}xattr() outside of the sb_writers lock. This also has the minor benefits that we don't need to reget these in the event of a retry and we never try to take the sb_writers lock in the event we can't pull the name and value into the kernel. Alternative approaches that might fix this include moving the dispatch of a write to the cache off to a workqueue or trying to do without the validation lock in afs. Note that this might also affect other filesystems that use netfslib and/or cachefiles. ====================================================== WARNING: possible circular locking dependency detected 6.10.0-build2+ #956 Not tainted ------------------------------------------------------ fsstress/6050 is trying to acquire lock: ffff888138fd82f0 (mapping.invalidate_lock#3){++++}-{3:3}, at: filemap_fault+0x26e/0x8b0 but task is already holding lock: ffff888113f26d18 (&vma->vm_lock->lock){++++}-{3:3}, at: lock_vma_under_rcu+0x165/0x250 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #4 (&vma->vm_lock->lock){++++}-{3:3}: __lock_acquire+0xaf0/0xd80 lock_acquire.part.0+0x103/0x280 down_write+0x3b/0x50 vma_start_write+0x6b/0xa0 vma_link+0xcc/0x140 insert_vm_struct+0xb7/0xf0 alloc_bprm+0x2c1/0x390 kernel_execve+0x65/0x1a0 call_usermodehelper_exec_async+0x14d/0x190 ret_from_fork+0x24/0x40 ret_from_fork_asm+0x1a/0x30 -> #3 (&mm->mmap_lock){++++}-{3:3}: __lock_acquire+0xaf0/0xd80 lock_acquire.part.0+0x103/0x280 __might_fault+0x7c/0xb0 strncpy_from_user+0x25/0x160 removexattr+0x7f/0x100 __do_sys_fremovexattr+0x7e/0xb0 do_syscall_64+0x9f/0x100 entry_SYSCALL_64_after_hwframe+0x76/0x7e -> #2 (sb_writers#14){.+.+}-{0:0}: __lock_acquire+0xaf0/0xd80 lock_acquire.part.0+0x103/0x280 percpu_down_read+0x3c/0x90 vfs_iocb_iter_write+0xe9/0x1d0 __cachefiles_write+0x367/0x430 cachefiles_issue_write+0x299/0x2f0 netfs_advance_write+0x117/0x140 netfs_write_folio.isra.0+0x5ca/0x6e0 netfs_writepages+0x230/0x2f0 afs_writepages+0x4d/0x70 do_writepages+0x1e8/0x3e0 filemap_fdatawrite_wbc+0x84/0xa0 __filemap_fdatawrite_range+0xa8/0xf0 file_write_and_wait_range+0x59/0x90 afs_release+0x10f/0x270 __fput+0x25f/0x3d0 __do_sys_close+0x43/0x70 do_syscall_64+0x9f/0x100 entry_SYSCALL_64_after_hwframe+0x76/0x7e -> #1 (&vnode->validate_lock){++++}-{3:3}: __lock_acquire+0xaf0/0xd80 lock_acquire.part.0+0x103/0x280 down_read+0x95/0x200 afs_writepages+0x37/0x70 do_writepages+0x1e8/0x3e0 filemap_fdatawrite_wbc+0x84/0xa0 filemap_invalidate_inode+0x167/0x1e0 netfs_unbuffered_write_iter+0x1bd/0x2d0 vfs_write+0x22e/0x320 ksys_write+0xbc/0x130 do_syscall_64+0x9f/0x100 entry_SYSCALL_64_after_hwframe+0x76/0x7e -> #0 (mapping.invalidate_lock#3){++++}-{3:3}: check_noncircular+0x119/0x160 check_prev_add+0x195/0x430 __lock_acquire+0xaf0/0xd80 lock_acquire.part.0+0x103/0x280 down_read+0x95/0x200 filemap_fault+0x26e/0x8b0 __do_fault+0x57/0xd0 do_pte_missing+0x23b/0x320 __handle_mm_fault+0x2d4/0x320 handle_mm_fault+0x14f/0x260 do_user_addr_fault+0x2a2/0x500 exc_page_fault+0x71/0x90 asm_exc_page_fault+0x22/0x30 other info that might help us debug this: Chain exists of: mapping.invalidate_lock#3 --> &mm->mmap_lock --> &vma->vm_lock->lock Possible unsafe locking scenario: CPU0 CPU1 ---- ---- rlock(&vma->vm_lock->lock); lock(&mm->mmap_lock); lock(&vma->vm_lock->lock); rlock(mapping.invalidate_lock#3); *** DEADLOCK *** 1 lock held by fsstress/6050: #0: ffff888113f26d18 (&vma->vm_lock->lock){++++}-{3:3}, at: lock_vma_under_rcu+0x165/0x250 stack backtrace: CPU: 0 PID: 6050 Comm: fsstress Not tainted 6.10.0-build2+ #956 Hardware name: ASUS All Series/H97-PLUS, BIOS 2306 10/09/2014 Call Trace: dump_stack_lvl+0x57/0x80 check_noncircular+0x119/0x160 ? queued_spin_lock_slowpath+0x4be/0x510 ? __pfx_check_noncircular+0x10/0x10 ? __pfx_queued_spin_lock_slowpath+0x10/0x10 ? mark_lock+0x47/0x160 ? init_chain_block+0x9c/0xc0 ? add_chain_block+0x84/0xf0 check_prev_add+0x195/0x430 __lock_acquire+0xaf0/0xd80 ? __pfx___lock_acquire+0x10/0x10 ? __lock_release.isra.0+0x13b/0x230 lock_acquire.part.0+0x103/0x280 ? filemap_fault+0x26e/0x8b0 ? __pfx_lock_acquire.part.0+0x10/0x10 ? rcu_is_watching+0x34/0x60 ? lock_acquire+0xd7/0x120 down_read+0x95/0x200 ? filemap_fault+0x26e/0x8b0 ? __pfx_down_read+0x10/0x10 ? __filemap_get_folio+0x25/0x1a0 filemap_fault+0x26e/0x8b0 ? __pfx_filemap_fault+0x10/0x10 ? find_held_lock+0x7c/0x90 ? __pfx___lock_release.isra.0+0x10/0x10 ? __pte_offset_map+0x99/0x110 __do_fault+0x57/0xd0 do_pte_missing+0x23b/0x320 __handle_mm_fault+0x2d4/0x320 ? __pfx___handle_mm_fault+0x10/0x10 handle_mm_fault+0x14f/0x260 do_user_addr_fault+0x2a2/0x500 exc_page_fault+0x71/0x90 asm_exc_page_fault+0x22/0x30 Signed-off-by: David Howells Link: https://lore.kernel.org/r/2136178.1721725194@warthog.procyon.org.uk cc: Alexander Viro cc: Christian Brauner cc: Jan Kara cc: Jeff Layton cc: Gao Xiang cc: Matthew Wilcox cc: netfs@lists.linux.dev cc: linux-erofs@lists.ozlabs.org cc: linux-fsdevel@vger.kernel.org [brauner: fix minor issues] Signed-off-by: Christian Brauner Signed-off-by: Sasha Levin --- fs/xattr.c | 91 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 48 insertions(+), 43 deletions(-) diff --git a/fs/xattr.c b/fs/xattr.c index f8b643f91a98..7672ce5486c5 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -630,10 +630,9 @@ int do_setxattr(struct mnt_idmap *idmap, struct dentry *dentry, ctx->kvalue, ctx->size, ctx->flags); } -static long -setxattr(struct mnt_idmap *idmap, struct dentry *d, - const char __user *name, const void __user *value, size_t size, - int flags) +static int path_setxattr(const char __user *pathname, + const char __user *name, const void __user *value, + size_t size, int flags, unsigned int lookup_flags) { struct xattr_name kname; struct xattr_ctx ctx = { @@ -643,33 +642,20 @@ setxattr(struct mnt_idmap *idmap, struct dentry *d, .kname = &kname, .flags = flags, }; + struct path path; int error; error = setxattr_copy(name, &ctx); if (error) return error; - error = do_setxattr(idmap, d, &ctx); - - kvfree(ctx.kvalue); - return error; -} - -static int path_setxattr(const char __user *pathname, - const char __user *name, const void __user *value, - size_t size, int flags, unsigned int lookup_flags) -{ - struct path path; - int error; - retry: error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path); if (error) - return error; + goto out; error = mnt_want_write(path.mnt); if (!error) { - error = setxattr(mnt_idmap(path.mnt), path.dentry, name, - value, size, flags); + error = do_setxattr(mnt_idmap(path.mnt), path.dentry, &ctx); mnt_drop_write(path.mnt); } path_put(&path); @@ -677,6 +663,9 @@ static int path_setxattr(const char __user *pathname, lookup_flags |= LOOKUP_REVAL; goto retry; } + +out: + kvfree(ctx.kvalue); return error; } @@ -697,20 +686,32 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname, SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name, const void __user *,value, size_t, size, int, flags) { - struct fd f = fdget(fd); - int error = -EBADF; + struct xattr_name kname; + struct xattr_ctx ctx = { + .cvalue = value, + .kvalue = NULL, + .size = size, + .kname = &kname, + .flags = flags, + }; + int error; + CLASS(fd, f)(fd); if (!f.file) - return error; + return -EBADF; + audit_file(f.file); + error = setxattr_copy(name, &ctx); + if (error) + return error; + error = mnt_want_write_file(f.file); if (!error) { - error = setxattr(file_mnt_idmap(f.file), - f.file->f_path.dentry, name, - value, size, flags); + error = do_setxattr(file_mnt_idmap(f.file), + f.file->f_path.dentry, &ctx); mnt_drop_write_file(f.file); } - fdput(f); + kvfree(ctx.kvalue); return error; } @@ -899,9 +900,17 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size) * Extended attribute REMOVE operations */ static long -removexattr(struct mnt_idmap *idmap, struct dentry *d, - const char __user *name) +removexattr(struct mnt_idmap *idmap, struct dentry *d, const char *name) { + if (is_posix_acl_xattr(name)) + return vfs_remove_acl(idmap, d, name); + return vfs_removexattr(idmap, d, name); +} + +static int path_removexattr(const char __user *pathname, + const char __user *name, unsigned int lookup_flags) +{ + struct path path; int error; char kname[XATTR_NAME_MAX + 1]; @@ -910,25 +919,13 @@ removexattr(struct mnt_idmap *idmap, struct dentry *d, error = -ERANGE; if (error < 0) return error; - - if (is_posix_acl_xattr(kname)) - return vfs_remove_acl(idmap, d, kname); - - return vfs_removexattr(idmap, d, kname); -} - -static int path_removexattr(const char __user *pathname, - const char __user *name, unsigned int lookup_flags) -{ - struct path path; - int error; retry: error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path); if (error) return error; error = mnt_want_write(path.mnt); if (!error) { - error = removexattr(mnt_idmap(path.mnt), path.dentry, name); + error = removexattr(mnt_idmap(path.mnt), path.dentry, kname); mnt_drop_write(path.mnt); } path_put(&path); @@ -954,15 +951,23 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname, SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name) { struct fd f = fdget(fd); + char kname[XATTR_NAME_MAX + 1]; int error = -EBADF; if (!f.file) return error; audit_file(f.file); + + error = strncpy_from_user(kname, name, sizeof(kname)); + if (error == 0 || error == sizeof(kname)) + error = -ERANGE; + if (error < 0) + return error; + error = mnt_want_write_file(f.file); if (!error) { error = removexattr(file_mnt_idmap(f.file), - f.file->f_path.dentry, name); + f.file->f_path.dentry, kname); mnt_drop_write_file(f.file); } fdput(f); From f62bf0cc60758c1b53c8997d4059c5ee92816995 Mon Sep 17 00:00:00 2001 From: Frank Li Date: Mon, 3 Jun 2024 11:15:27 -0400 Subject: [PATCH 257/374] i3c: master: svc: resend target address when get NACK [ Upstream commit 9bc7501b0b90f4d0c34b97c14ff1f708ce7ad8f3 ] According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3: If the Controller chooses to start an I3C Message with an I3C Dynamic Address, then special provisions shall be made because that same I3C Target may be initiating an IBI or a Controller Role Request. So, one of three things may happen: (skip 1, 2) 3. The Addresses match and the RnW bits also match, and so neither Controller nor Target will ACK since both are expecting the other side to provide ACK. As a result, each side might think it had "won" arbitration, but neither side would continue, as each would subsequently see that the other did not provide ACK. ... For either value of RnW: Due to the NACK, the Controller shall defer the Private Write or Private Read, and should typically transmit the Target ^^^^^^^^^^^^^^^^^^^ Address again after a Repeated START (i.e., the next one or any one prior ^^^^^^^^^^^^^ to a STOP in the Frame). Since the Address Header following a Repeated START is not arbitrated, the Controller will always win (see Section 5.1.2.2.4). Resend target address again if address is not 7E and controller get NACK. Reviewed-by: Miquel Raynal Signed-off-by: Frank Li Signed-off-by: Alexandre Belloni Signed-off-by: Sasha Levin --- drivers/i3c/master/svc-i3c-master.c | 58 ++++++++++++++++++++++------- 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index bb299ce02ccc..f0362509319e 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -1052,29 +1052,59 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master, u8 *in, const u8 *out, unsigned int xfer_len, unsigned int *actual_len, bool continued) { + int retry = 2; u32 reg; int ret; /* clean SVC_I3C_MINT_IBIWON w1c bits */ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); - writel(SVC_I3C_MCTRL_REQUEST_START_ADDR | - xfer_type | - SVC_I3C_MCTRL_IBIRESP_NACK | - SVC_I3C_MCTRL_DIR(rnw) | - SVC_I3C_MCTRL_ADDR(addr) | - SVC_I3C_MCTRL_RDTERM(*actual_len), - master->regs + SVC_I3C_MCTRL); - ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, + while (retry--) { + writel(SVC_I3C_MCTRL_REQUEST_START_ADDR | + xfer_type | + SVC_I3C_MCTRL_IBIRESP_NACK | + SVC_I3C_MCTRL_DIR(rnw) | + SVC_I3C_MCTRL_ADDR(addr) | + SVC_I3C_MCTRL_RDTERM(*actual_len), + master->regs + SVC_I3C_MCTRL); + + ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000); - if (ret) - goto emit_stop; + if (ret) + goto emit_stop; - if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) { - ret = -ENXIO; - *actual_len = 0; - goto emit_stop; + if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) { + /* + * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3. + * If the Controller chooses to start an I3C Message with an I3C Dynamic + * Address, then special provisions shall be made because that same I3C + * Target may be initiating an IBI or a Controller Role Request. So, one of + * three things may happen: (skip 1, 2) + * + * 3. The Addresses match and the RnW bits also match, and so neither + * Controller nor Target will ACK since both are expecting the other side to + * provide ACK. As a result, each side might think it had "won" arbitration, + * but neither side would continue, as each would subsequently see that the + * other did not provide ACK. + * ... + * For either value of RnW: Due to the NACK, the Controller shall defer the + * Private Write or Private Read, and should typically transmit the Target + * Address again after a Repeated START (i.e., the next one or any one prior + * to a STOP in the Frame). Since the Address Header following a Repeated + * START is not arbitrated, the Controller will always win (see Section + * 5.1.2.2.4). + */ + if (retry && addr != 0x7e) { + writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN); + } else { + ret = -ENXIO; + *actual_len = 0; + goto emit_stop; + } + } else { + break; + } } /* From e2d14bfda9eb5393f8a17008afe2aa7fe0a29815 Mon Sep 17 00:00:00 2001 From: Jarkko Nikula Date: Fri, 28 Jun 2024 16:15:58 +0300 Subject: [PATCH 258/374] i3c: mipi-i3c-hci: Error out instead on BUG_ON() in IBI DMA setup [ Upstream commit 8a2be2f1db268ec735419e53ef04ca039fc027dc ] Definitely condition dma_get_cache_alignment * defined value > 256 during driver initialization is not reason to BUG_ON(). Turn that to graceful error out with -EINVAL. Signed-off-by: Jarkko Nikula Link: https://lore.kernel.org/r/20240628131559.502822-3-jarkko.nikula@linux.intel.com Signed-off-by: Alexandre Belloni Signed-off-by: Sasha Levin --- drivers/i3c/master/mipi-i3c-hci/dma.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c index 4e01a95cc4d0..1a96bf5a0bf8 100644 --- a/drivers/i3c/master/mipi-i3c-hci/dma.c +++ b/drivers/i3c/master/mipi-i3c-hci/dma.c @@ -294,7 +294,10 @@ static int hci_dma_init(struct i3c_hci *hci) rh->ibi_chunk_sz = dma_get_cache_alignment(); rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES; - BUG_ON(rh->ibi_chunk_sz > 256); + if (rh->ibi_chunk_sz > 256) { + ret = -EINVAL; + goto err_out; + } ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries; ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total; From f743662ab629acbc4f2ba5a7aaf607cde14ba472 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Mon, 29 Jul 2024 10:46:04 +0800 Subject: [PATCH 259/374] kselftests: dmabuf-heaps: Ensure the driver name is null-terminated [ Upstream commit 291e4baf70019f17a81b7b47aeb186b27d222159 ] Even if a vgem device is configured in, we will skip the import_vgem_fd() test almost every time. TAP version 13 1..11 # Testing heap: system # ======================================= # Testing allocation and importing: ok 1 # SKIP Could not open vgem -1 The problem is that we use the DRM_IOCTL_VERSION ioctl to query the driver version information but leave the name field a non-null-terminated string. Terminate it properly to actually test against the vgem device. While at it, let's check the length of the driver name is exactly 4 bytes and return early otherwise (in case there is a name like "vgemfoo" that gets converted to "vgem\0" unexpectedly). Signed-off-by: Zenghui Yu Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20240729024604.2046-1-yuzenghui@huawei.com Signed-off-by: Sasha Levin --- tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c index 5f541522364f..5d0a809dc2df 100644 --- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c +++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c @@ -29,9 +29,11 @@ static int check_vgem(int fd) version.name = name; ret = ioctl(fd, DRM_IOCTL_VERSION, &version); - if (ret) + if (ret || version.name_len != 4) return 0; + name[4] = '\0'; + return !strcmp(name, "vgem"); } From ee73a15d4a8ce8fb02d7866f7cf78fcdd16f0fcc Mon Sep 17 00:00:00 2001 From: Devyn Liu Date: Tue, 30 Jul 2024 11:20:40 +0800 Subject: [PATCH 260/374] spi: hisi-kunpeng: Add verification for the max_frequency provided by the firmware [ Upstream commit 5127c42c77de18651aa9e8e0a3ced190103b449c ] If the value of max_speed_hz is 0, it may cause a division by zero error in hisi_calc_effective_speed(). The value of max_speed_hz is provided by firmware. Firmware is generally considered as a trusted domain. However, as division by zero errors can cause system failure, for defense measure, the value of max_speed is validated here. So 0 is regarded as invalid and an error code is returned. Signed-off-by: Devyn Liu Reviewed-by: Jay Fang Link: https://patch.msgid.link/20240730032040.3156393-3-liudingyuan@huawei.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/spi/spi-hisi-kunpeng.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c index 6910b4d4c427..16054695bdb0 100644 --- a/drivers/spi/spi-hisi-kunpeng.c +++ b/drivers/spi/spi-hisi-kunpeng.c @@ -481,6 +481,9 @@ static int hisi_spi_probe(struct platform_device *pdev) return -EINVAL; } + if (host->max_speed_hz == 0) + return dev_err_probe(dev, -EINVAL, "spi-max-frequency can't be 0\n"); + ret = device_property_read_u16(dev, "num-cs", &host->num_chipselect); if (ret) From 55b046ec8a4fca2a77880768d882c7e930b43f07 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 29 Jul 2024 21:59:24 +0200 Subject: [PATCH 261/374] btrfs: initialize location to fix -Wmaybe-uninitialized in btrfs_lookup_dentry() [ Upstream commit b8e947e9f64cac9df85a07672b658df5b2bcff07 ] Some arch + compiler combinations report a potentially unused variable location in btrfs_lookup_dentry(). This is a false alert as the variable is passed by value and always valid or there's an error. The compilers cannot probably reason about that although btrfs_inode_by_name() is in the same file. > + /kisskb/src/fs/btrfs/inode.c: error: 'location.objectid' may be used +uninitialized in this function [-Werror=maybe-uninitialized]: => 5603:9 > + /kisskb/src/fs/btrfs/inode.c: error: 'location.type' may be used +uninitialized in this function [-Werror=maybe-uninitialized]: => 5674:5 m68k-gcc8/m68k-allmodconfig mips-gcc8/mips-allmodconfig powerpc-gcc5/powerpc-all{mod,yes}config powerpc-gcc5/ppc64_defconfig Initialize it to zero, this should fix the warnings and won't change the behaviour as btrfs_inode_by_name() accepts only a root or inode item types, otherwise returns an error. Reported-by: Geert Uytterhoeven Tested-by: Geert Uytterhoeven Link: https://lore.kernel.org/linux-btrfs/bd4e9928-17b3-9257-8ba7-6b7f9bbb639a@linux-m68k.org/ Reviewed-by: Qu Wenruo Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c2f48fc159e5..2951aa0039fc 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5699,7 +5699,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) struct inode *inode; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_root *sub_root = root; - struct btrfs_key location; + struct btrfs_key location = { 0 }; u8 di_type = 0; int ret = 0; From d21f3480b6db9c64656981d80c2bf517b6ef7334 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 29 Jul 2024 13:06:43 +0200 Subject: [PATCH 262/374] s390/vmlinux.lds.S: Move ro_after_init section behind rodata section [ Upstream commit 75c10d5377d8821efafed32e4d72068d9c1f8ec0 ] The .data.rel.ro and .got section were added between the rodata and ro_after_init data section, which adds an RW mapping in between all RO mapping of the kernel image: ---[ Kernel Image Start ]--- 0x000003ffe0000000-0x000003ffe0e00000 14M PMD RO X 0x000003ffe0e00000-0x000003ffe0ec7000 796K PTE RO X 0x000003ffe0ec7000-0x000003ffe0f00000 228K PTE RO NX 0x000003ffe0f00000-0x000003ffe1300000 4M PMD RO NX 0x000003ffe1300000-0x000003ffe1331000 196K PTE RO NX 0x000003ffe1331000-0x000003ffe13b3000 520K PTE RW NX <--- 0x000003ffe13b3000-0x000003ffe13d5000 136K PTE RO NX 0x000003ffe13d5000-0x000003ffe1400000 172K PTE RW NX 0x000003ffe1400000-0x000003ffe1500000 1M PMD RW NX 0x000003ffe1500000-0x000003ffe1700000 2M PTE RW NX 0x000003ffe1700000-0x000003ffe1800000 1M PMD RW NX 0x000003ffe1800000-0x000003ffe187e000 504K PTE RW NX ---[ Kernel Image End ]--- Move the ro_after_init data section again right behind the rodata section to prevent interleaving RO and RW mappings: ---[ Kernel Image Start ]--- 0x000003ffe0000000-0x000003ffe0e00000 14M PMD RO X 0x000003ffe0e00000-0x000003ffe0ec7000 796K PTE RO X 0x000003ffe0ec7000-0x000003ffe0f00000 228K PTE RO NX 0x000003ffe0f00000-0x000003ffe1300000 4M PMD RO NX 0x000003ffe1300000-0x000003ffe1353000 332K PTE RO NX 0x000003ffe1353000-0x000003ffe1400000 692K PTE RW NX 0x000003ffe1400000-0x000003ffe1500000 1M PMD RW NX 0x000003ffe1500000-0x000003ffe1700000 2M PTE RW NX 0x000003ffe1700000-0x000003ffe1800000 1M PMD RW NX 0x000003ffe1800000-0x000003ffe187e000 504K PTE RW NX ---[ Kernel Image End ]--- Reviewed-by: Alexander Gordeev Signed-off-by: Heiko Carstens Signed-off-by: Vasily Gorbik Signed-off-by: Sasha Levin --- arch/s390/kernel/vmlinux.lds.S | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 52bd969b2828..779162c664c4 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -59,14 +59,6 @@ SECTIONS } :text = 0x0700 RO_DATA(PAGE_SIZE) - .data.rel.ro : { - *(.data.rel.ro .data.rel.ro.*) - } - .got : { - __got_start = .; - *(.got) - __got_end = .; - } . = ALIGN(PAGE_SIZE); _sdata = .; /* Start of data section */ @@ -80,6 +72,15 @@ SECTIONS . = ALIGN(PAGE_SIZE); __end_ro_after_init = .; + .data.rel.ro : { + *(.data.rel.ro .data.rel.ro.*) + } + .got : { + __got_start = .; + *(.got) + __got_end = .; + } + RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE) .data.rel : { *(.data.rel*) From 48b2108efa205f4579052c27fba2b22cc6ad8aa0 Mon Sep 17 00:00:00 2001 From: Camila Alvarez Date: Tue, 30 Jul 2024 19:42:43 -0400 Subject: [PATCH 263/374] HID: cougar: fix slab-out-of-bounds Read in cougar_report_fixup [ Upstream commit a6e9c391d45b5865b61e569146304cff72821a5d ] report_fixup for the Cougar 500k Gaming Keyboard was not verifying that the report descriptor size was correct before accessing it Reported-by: syzbot+24c0361074799d02c452@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=24c0361074799d02c452 Signed-off-by: Camila Alvarez Reviewed-by: Silvan Jegen Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin --- drivers/hid/hid-cougar.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c index cb8bd8aae15b..0fa785f52707 100644 --- a/drivers/hid/hid-cougar.c +++ b/drivers/hid/hid-cougar.c @@ -106,7 +106,7 @@ static void cougar_fix_g6_mapping(void) static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (rdesc[2] == 0x09 && rdesc[3] == 0x02 && + if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 && (rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) { hid_info(hdev, "usage count exceeds max: fixing up report descriptor\n"); From adb3e3c1ddb5a23b8b7122ef1913f528d728937c Mon Sep 17 00:00:00 2001 From: Olivier Sobrie Date: Tue, 23 Jul 2024 10:44:35 +0200 Subject: [PATCH 264/374] HID: amd_sfh: free driver_data after destroying hid device [ Upstream commit 97155021ae17b86985121b33cf8098bcde00d497 ] HID driver callbacks aren't called anymore once hid_destroy_device() has been called. Hence, hid driver_data should be freed only after the hid_destroy_device() function returned as driver_data is used in several callbacks. I observed a crash with kernel 6.10.0 on my T14s Gen 3, after enabling KASAN to debug memory allocation, I got this output: [ 13.050438] ================================================================== [ 13.054060] BUG: KASAN: slab-use-after-free in amd_sfh_get_report+0x3ec/0x530 [amd_sfh] [ 13.054809] psmouse serio1: trackpoint: Synaptics TrackPoint firmware: 0x02, buttons: 3/3 [ 13.056432] Read of size 8 at addr ffff88813152f408 by task (udev-worker)/479 [ 13.060970] CPU: 5 PID: 479 Comm: (udev-worker) Not tainted 6.10.0-arch1-2 #1 893bb55d7f0073f25c46adbb49eb3785fefd74b0 [ 13.063978] Hardware name: LENOVO 21CQCTO1WW/21CQCTO1WW, BIOS R22ET70W (1.40 ) 03/21/2024 [ 13.067860] Call Trace: [ 13.069383] input: TPPS/2 Synaptics TrackPoint as /devices/platform/i8042/serio1/input/input8 [ 13.071486] [ 13.071492] dump_stack_lvl+0x5d/0x80 [ 13.074870] snd_hda_intel 0000:33:00.6: enabling device (0000 -> 0002) [ 13.078296] ? amd_sfh_get_report+0x3ec/0x530 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38] [ 13.082199] print_report+0x174/0x505 [ 13.085776] ? __pfx__raw_spin_lock_irqsave+0x10/0x10 [ 13.089367] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.093255] ? amd_sfh_get_report+0x3ec/0x530 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38] [ 13.097464] kasan_report+0xc8/0x150 [ 13.101461] ? amd_sfh_get_report+0x3ec/0x530 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38] [ 13.105802] amd_sfh_get_report+0x3ec/0x530 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38] [ 13.110303] amdtp_hid_request+0xb8/0x110 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38] [ 13.114879] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.119450] sensor_hub_get_feature+0x1d3/0x540 [hid_sensor_hub 3f13be3016ff415bea03008d45d99da837ee3082] [ 13.124097] hid_sensor_parse_common_attributes+0x4d0/0xad0 [hid_sensor_iio_common c3a5cbe93969c28b122609768bbe23efe52eb8f5] [ 13.127404] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.131925] ? __pfx_hid_sensor_parse_common_attributes+0x10/0x10 [hid_sensor_iio_common c3a5cbe93969c28b122609768bbe23efe52eb8f5] [ 13.136455] ? _raw_spin_lock_irqsave+0x96/0xf0 [ 13.140197] ? __pfx__raw_spin_lock_irqsave+0x10/0x10 [ 13.143602] ? devm_iio_device_alloc+0x34/0x50 [industrialio 3d261d5e5765625d2b052be40e526d62b1d2123b] [ 13.147234] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.150446] ? __devm_add_action+0x167/0x1d0 [ 13.155061] hid_gyro_3d_probe+0x120/0x7f0 [hid_sensor_gyro_3d 63da36a143b775846ab2dbb86c343b401b5e3172] [ 13.158581] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.161814] platform_probe+0xa2/0x150 [ 13.165029] really_probe+0x1e3/0x8a0 [ 13.168243] __driver_probe_device+0x18c/0x370 [ 13.171500] driver_probe_device+0x4a/0x120 [ 13.175000] __driver_attach+0x190/0x4a0 [ 13.178521] ? __pfx___driver_attach+0x10/0x10 [ 13.181771] bus_for_each_dev+0x106/0x180 [ 13.185033] ? __pfx__raw_spin_lock+0x10/0x10 [ 13.188229] ? __pfx_bus_for_each_dev+0x10/0x10 [ 13.191446] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.194382] bus_add_driver+0x29e/0x4d0 [ 13.197328] driver_register+0x1a5/0x360 [ 13.200283] ? __pfx_hid_gyro_3d_platform_driver_init+0x10/0x10 [hid_sensor_gyro_3d 63da36a143b775846ab2dbb86c343b401b5e3172] [ 13.203362] do_one_initcall+0xa7/0x380 [ 13.206432] ? __pfx_do_one_initcall+0x10/0x10 [ 13.210175] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.213211] ? kasan_unpoison+0x44/0x70 [ 13.216688] do_init_module+0x238/0x750 [ 13.219696] load_module+0x5011/0x6af0 [ 13.223096] ? kasan_save_stack+0x30/0x50 [ 13.226743] ? kasan_save_track+0x14/0x30 [ 13.230080] ? kasan_save_free_info+0x3b/0x60 [ 13.233323] ? poison_slab_object+0x109/0x180 [ 13.236778] ? __pfx_load_module+0x10/0x10 [ 13.239703] ? poison_slab_object+0x109/0x180 [ 13.243070] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.245924] ? init_module_from_file+0x13d/0x150 [ 13.248745] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.251503] ? init_module_from_file+0xdf/0x150 [ 13.254198] init_module_from_file+0xdf/0x150 [ 13.256826] ? __pfx_init_module_from_file+0x10/0x10 [ 13.259428] ? kasan_save_track+0x14/0x30 [ 13.261959] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.264471] ? kasan_save_free_info+0x3b/0x60 [ 13.267026] ? poison_slab_object+0x109/0x180 [ 13.269494] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.271949] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.274324] ? _raw_spin_lock+0x85/0xe0 [ 13.276671] ? __pfx__raw_spin_lock+0x10/0x10 [ 13.278963] ? __rseq_handle_notify_resume+0x1a6/0xad0 [ 13.281193] idempotent_init_module+0x23b/0x650 [ 13.283420] ? __pfx_idempotent_init_module+0x10/0x10 [ 13.285619] ? __pfx___seccomp_filter+0x10/0x10 [ 13.287714] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.289828] ? __fget_light+0x57/0x420 [ 13.291870] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.293880] ? security_capable+0x74/0xb0 [ 13.295820] __x64_sys_finit_module+0xbe/0x130 [ 13.297874] do_syscall_64+0x82/0x190 [ 13.299898] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.301905] ? irqtime_account_irq+0x3d/0x1f0 [ 13.303877] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.305753] ? __irq_exit_rcu+0x4e/0x130 [ 13.307577] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.309489] entry_SYSCALL_64_after_hwframe+0x76/0x7e [ 13.311371] RIP: 0033:0x7a21f96ade9d [ 13.313234] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 63 de 0c 00 f7 d8 64 89 01 48 [ 13.317051] RSP: 002b:00007ffeae934e78 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 [ 13.319024] RAX: ffffffffffffffda RBX: 00005987276bfcf0 RCX: 00007a21f96ade9d [ 13.321100] RDX: 0000000000000004 RSI: 00007a21f8eda376 RDI: 000000000000001c [ 13.323314] RBP: 00007a21f8eda376 R08: 0000000000000001 R09: 00007ffeae934ec0 [ 13.325505] R10: 0000000000000050 R11: 0000000000000246 R12: 0000000000020000 [ 13.327637] R13: 00005987276c1250 R14: 0000000000000000 R15: 00005987276c4530 [ 13.329737] [ 13.333945] Allocated by task 139: [ 13.336111] kasan_save_stack+0x30/0x50 [ 13.336121] kasan_save_track+0x14/0x30 [ 13.336125] __kasan_kmalloc+0xaa/0xb0 [ 13.336129] amdtp_hid_probe+0xb1/0x440 [amd_sfh] [ 13.336138] amd_sfh_hid_client_init+0xb8a/0x10f0 [amd_sfh] [ 13.336144] sfh_init_work+0x47/0x120 [amd_sfh] [ 13.336150] process_one_work+0x673/0xeb0 [ 13.336155] worker_thread+0x795/0x1250 [ 13.336160] kthread+0x290/0x350 [ 13.336164] ret_from_fork+0x34/0x70 [ 13.336169] ret_from_fork_asm+0x1a/0x30 [ 13.338175] Freed by task 139: [ 13.340064] kasan_save_stack+0x30/0x50 [ 13.340072] kasan_save_track+0x14/0x30 [ 13.340076] kasan_save_free_info+0x3b/0x60 [ 13.340081] poison_slab_object+0x109/0x180 [ 13.340085] __kasan_slab_free+0x32/0x50 [ 13.340089] kfree+0xe5/0x310 [ 13.340094] amdtp_hid_remove+0xb2/0x160 [amd_sfh] [ 13.340102] amd_sfh_hid_client_deinit+0x324/0x640 [amd_sfh] [ 13.340107] amd_sfh_hid_client_init+0x94a/0x10f0 [amd_sfh] [ 13.340113] sfh_init_work+0x47/0x120 [amd_sfh] [ 13.340118] process_one_work+0x673/0xeb0 [ 13.340123] worker_thread+0x795/0x1250 [ 13.340127] kthread+0x290/0x350 [ 13.340132] ret_from_fork+0x34/0x70 [ 13.340136] ret_from_fork_asm+0x1a/0x30 [ 13.342482] The buggy address belongs to the object at ffff88813152f400 which belongs to the cache kmalloc-64 of size 64 [ 13.347357] The buggy address is located 8 bytes inside of freed 64-byte region [ffff88813152f400, ffff88813152f440) [ 13.347367] The buggy address belongs to the physical page: [ 13.355409] page: refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x13152f [ 13.355416] anon flags: 0x2ffff8000000000(node=0|zone=2|lastcpupid=0x1ffff) [ 13.355423] page_type: 0xffffefff(slab) [ 13.355429] raw: 02ffff8000000000 ffff8881000428c0 ffffea0004c43a00 0000000000000005 [ 13.355435] raw: 0000000000000000 0000000000200020 00000001ffffefff 0000000000000000 [ 13.355439] page dumped because: kasan: bad access detected [ 13.357295] Memory state around the buggy address: [ 13.357299] ffff88813152f300: fa fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc [ 13.357303] ffff88813152f380: fa fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc [ 13.357306] >ffff88813152f400: fa fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc [ 13.357309] ^ [ 13.357311] ffff88813152f480: 00 00 00 00 00 fc fc fc fc fc fc fc fc fc fc fc [ 13.357315] ffff88813152f500: 00 00 00 00 00 00 00 06 fc fc fc fc fc fc fc fc [ 13.357318] ================================================================== [ 13.357405] Disabling lock debugging due to kernel taint [ 13.383534] Oops: general protection fault, probably for non-canonical address 0xe0a1bc4140000013: 0000 [#1] PREEMPT SMP KASAN NOPTI [ 13.383544] KASAN: maybe wild-memory-access in range [0x050e020a00000098-0x050e020a0000009f] [ 13.383551] CPU: 3 PID: 479 Comm: (udev-worker) Tainted: G B 6.10.0-arch1-2 #1 893bb55d7f0073f25c46adbb49eb3785fefd74b0 [ 13.383561] Hardware name: LENOVO 21CQCTO1WW/21CQCTO1WW, BIOS R22ET70W (1.40 ) 03/21/2024 [ 13.383565] RIP: 0010:amd_sfh_get_report+0x81/0x530 [amd_sfh] [ 13.383580] Code: 89 fa 48 c1 ea 03 80 3c 02 00 0f 85 78 03 00 00 48 b8 00 00 00 00 00 fc ff df 4c 8b 63 08 49 8d 7c 24 10 48 89 fa 48 c1 ea 03 <0f> b6 04 02 84 c0 74 08 3c 03 0f 8e 1a 03 00 00 45 8b 74 24 10 45 [ 13.383585] RSP: 0018:ffff8881261f7388 EFLAGS: 00010212 [ 13.383592] RAX: dffffc0000000000 RBX: ffff88813152f400 RCX: 0000000000000002 [ 13.383597] RDX: 00a1c04140000013 RSI: 0000000000000008 RDI: 050e020a0000009b [ 13.383600] RBP: ffff88814d010000 R08: 0000000000000002 R09: fffffbfff3ddb8c0 [ 13.383604] R10: ffffffff9eedc607 R11: ffff88810ce98000 R12: 050e020a0000008b [ 13.383607] R13: ffff88814d010000 R14: dffffc0000000000 R15: 0000000000000004 [ 13.383611] FS: 00007a21f94d0880(0000) GS:ffff8887e7d80000(0000) knlGS:0000000000000000 [ 13.383615] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 13.383618] CR2: 00007e0014c438f0 CR3: 000000012614c000 CR4: 0000000000f50ef0 [ 13.383622] PKRU: 55555554 [ 13.383625] Call Trace: [ 13.383629] [ 13.383632] ? __die_body.cold+0x19/0x27 [ 13.383644] ? die_addr+0x46/0x70 [ 13.383652] ? exc_general_protection+0x150/0x240 [ 13.383664] ? asm_exc_general_protection+0x26/0x30 [ 13.383674] ? amd_sfh_get_report+0x81/0x530 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38] [ 13.383686] ? amd_sfh_get_report+0x3ec/0x530 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38] [ 13.383697] amdtp_hid_request+0xb8/0x110 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38] [ 13.383706] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.383713] sensor_hub_get_feature+0x1d3/0x540 [hid_sensor_hub 3f13be3016ff415bea03008d45d99da837ee3082] [ 13.383727] hid_sensor_parse_common_attributes+0x4d0/0xad0 [hid_sensor_iio_common c3a5cbe93969c28b122609768bbe23efe52eb8f5] [ 13.383739] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.383745] ? __pfx_hid_sensor_parse_common_attributes+0x10/0x10 [hid_sensor_iio_common c3a5cbe93969c28b122609768bbe23efe52eb8f5] [ 13.383753] ? _raw_spin_lock_irqsave+0x96/0xf0 [ 13.383762] ? __pfx__raw_spin_lock_irqsave+0x10/0x10 [ 13.383768] ? devm_iio_device_alloc+0x34/0x50 [industrialio 3d261d5e5765625d2b052be40e526d62b1d2123b] [ 13.383790] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.383795] ? __devm_add_action+0x167/0x1d0 [ 13.383806] hid_gyro_3d_probe+0x120/0x7f0 [hid_sensor_gyro_3d 63da36a143b775846ab2dbb86c343b401b5e3172] [ 13.383818] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.383826] platform_probe+0xa2/0x150 [ 13.383832] really_probe+0x1e3/0x8a0 [ 13.383838] __driver_probe_device+0x18c/0x370 [ 13.383844] driver_probe_device+0x4a/0x120 [ 13.383851] __driver_attach+0x190/0x4a0 [ 13.383857] ? __pfx___driver_attach+0x10/0x10 [ 13.383863] bus_for_each_dev+0x106/0x180 [ 13.383868] ? __pfx__raw_spin_lock+0x10/0x10 [ 13.383874] ? __pfx_bus_for_each_dev+0x10/0x10 [ 13.383880] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.383887] bus_add_driver+0x29e/0x4d0 [ 13.383895] driver_register+0x1a5/0x360 [ 13.383902] ? __pfx_hid_gyro_3d_platform_driver_init+0x10/0x10 [hid_sensor_gyro_3d 63da36a143b775846ab2dbb86c343b401b5e3172] [ 13.383910] do_one_initcall+0xa7/0x380 [ 13.383919] ? __pfx_do_one_initcall+0x10/0x10 [ 13.383927] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.383933] ? kasan_unpoison+0x44/0x70 [ 13.383943] do_init_module+0x238/0x750 [ 13.383955] load_module+0x5011/0x6af0 [ 13.383962] ? kasan_save_stack+0x30/0x50 [ 13.383968] ? kasan_save_track+0x14/0x30 [ 13.383973] ? kasan_save_free_info+0x3b/0x60 [ 13.383980] ? poison_slab_object+0x109/0x180 [ 13.383993] ? __pfx_load_module+0x10/0x10 [ 13.384007] ? poison_slab_object+0x109/0x180 [ 13.384012] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.384018] ? init_module_from_file+0x13d/0x150 [ 13.384025] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.384032] ? init_module_from_file+0xdf/0x150 [ 13.384037] init_module_from_file+0xdf/0x150 [ 13.384044] ? __pfx_init_module_from_file+0x10/0x10 [ 13.384050] ? kasan_save_track+0x14/0x30 [ 13.384055] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.384060] ? kasan_save_free_info+0x3b/0x60 [ 13.384066] ? poison_slab_object+0x109/0x180 [ 13.384071] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.384080] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.384085] ? _raw_spin_lock+0x85/0xe0 [ 13.384091] ? __pfx__raw_spin_lock+0x10/0x10 [ 13.384096] ? __rseq_handle_notify_resume+0x1a6/0xad0 [ 13.384106] idempotent_init_module+0x23b/0x650 [ 13.384114] ? __pfx_idempotent_init_module+0x10/0x10 [ 13.384120] ? __pfx___seccomp_filter+0x10/0x10 [ 13.384129] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.384135] ? __fget_light+0x57/0x420 [ 13.384142] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.384147] ? security_capable+0x74/0xb0 [ 13.384157] __x64_sys_finit_module+0xbe/0x130 [ 13.384164] do_syscall_64+0x82/0x190 [ 13.384174] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.384179] ? irqtime_account_irq+0x3d/0x1f0 [ 13.384188] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.384193] ? __irq_exit_rcu+0x4e/0x130 [ 13.384201] ? srso_alias_return_thunk+0x5/0xfbef5 [ 13.384206] entry_SYSCALL_64_after_hwframe+0x76/0x7e [ 13.384212] RIP: 0033:0x7a21f96ade9d [ 13.384263] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 63 de 0c 00 f7 d8 64 89 01 48 [ 13.384267] RSP: 002b:00007ffeae934e78 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 [ 13.384273] RAX: ffffffffffffffda RBX: 00005987276bfcf0 RCX: 00007a21f96ade9d [ 13.384277] RDX: 0000000000000004 RSI: 00007a21f8eda376 RDI: 000000000000001c [ 13.384280] RBP: 00007a21f8eda376 R08: 0000000000000001 R09: 00007ffeae934ec0 [ 13.384284] R10: 0000000000000050 R11: 0000000000000246 R12: 0000000000020000 [ 13.384288] R13: 00005987276c1250 R14: 0000000000000000 R15: 00005987276c4530 [ 13.384297] [ 13.384299] Modules linked in: soundwire_amd(+) hid_sensor_gyro_3d(+) hid_sensor_magn_3d hid_sensor_accel_3d soundwire_generic_allocation amdxcp hid_sensor_trigger drm_exec industrialio_triggered_buffer soundwire_bus gpu_sched kvm_amd kfifo_buf qmi_helpers joydev drm_buddy hid_sensor_iio_common mousedev snd_soc_core industrialio i2c_algo_bit mac80211 snd_compress drm_suballoc_helper kvm snd_hda_intel drm_ttm_helper ac97_bus snd_pcm_dmaengine snd_intel_dspcfg ttm thinkpad_acpi(+) snd_intel_sdw_acpi hid_sensor_hub snd_rpl_pci_acp6x drm_display_helper snd_hda_codec hid_multitouch libarc4 snd_acp_pci platform_profile think_lmi(+) hid_generic firmware_attributes_class wmi_bmof cec snd_acp_legacy_common sparse_keymap rapl snd_hda_core psmouse cfg80211 pcspkr snd_pci_acp6x snd_hwdep video snd_pcm snd_pci_acp5x snd_timer snd_rn_pci_acp3x ucsi_acpi snd_acp_config snd sp5100_tco rfkill snd_soc_acpi typec_ucsi thunderbolt amd_sfh k10temp mhi soundcore i2c_piix4 snd_pci_acp3x typec i2c_hid_acpi roles i2c_hid wmi acpi_tad amd_pmc [ 13.384454] mac_hid i2c_dev crypto_user loop nfnetlink zram ip_tables x_tables dm_crypt cbc encrypted_keys trusted asn1_encoder tee dm_mod crct10dif_pclmul crc32_pclmul polyval_clmulni polyval_generic gf128mul ghash_clmulni_intel serio_raw sha512_ssse3 atkbd sha256_ssse3 libps2 sha1_ssse3 vivaldi_fmap nvme aesni_intel crypto_simd nvme_core cryptd ccp xhci_pci i8042 nvme_auth xhci_pci_renesas serio vfat fat btrfs blake2b_generic libcrc32c crc32c_generic crc32c_intel xor raid6_pq [ 13.384552] ---[ end trace 0000000000000000 ]--- KASAN reports a use-after-free of hid->driver_data in function amd_sfh_get_report(). The backtrace indicates that the function is called by amdtp_hid_request() which is one of the callbacks of hid device. The current make sure that driver_data is freed only once hid_destroy_device() returned. Note that I observed the crash both on v6.9.9 and v6.10.0. The code seems to be as it was from the early days of the driver. Signed-off-by: Olivier Sobrie Acked-by: Basavaraj Natikar Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin --- drivers/hid/amd-sfh-hid/amd_sfh_hid.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c index 705b52337068..81f3024b7b1b 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c @@ -171,11 +171,13 @@ int amdtp_hid_probe(u32 cur_hid_dev, struct amdtp_cl_data *cli_data) void amdtp_hid_remove(struct amdtp_cl_data *cli_data) { int i; + struct amdtp_hid_data *hid_data; for (i = 0; i < cli_data->num_hid_devices; ++i) { if (cli_data->hid_sensor_hubs[i]) { - kfree(cli_data->hid_sensor_hubs[i]->driver_data); + hid_data = cli_data->hid_sensor_hubs[i]->driver_data; hid_destroy_device(cli_data->hid_sensor_hubs[i]); + kfree(hid_data); cli_data->hid_sensor_hubs[i] = NULL; } } From d76fc0f0b18d49b7e721c9e4975ef4bffde2f3e7 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Sun, 4 Aug 2024 17:50:25 -0700 Subject: [PATCH 265/374] Input: uinput - reject requests with unreasonable number of slots [ Upstream commit 206f533a0a7c683982af473079c4111f4a0f9f5e ] From: Dmitry Torokhov When exercising uinput interface syzkaller may try setting up device with a really large number of slots, which causes memory allocation failure in input_mt_init_slots(). While this allocation failure is handled properly and request is rejected, it results in syzkaller reports. Additionally, such request may put undue burden on the system which will try to free a lot of memory for a bogus request. Fix it by limiting allowed number of slots to 100. This can easily be extended if we see devices that can track more than 100 contacts. Reported-by: Tetsuo Handa Reported-by: syzbot Closes: https://syzkaller.appspot.com/bug?extid=0122fa359a69694395d5 Link: https://lore.kernel.org/r/Zqgi7NYEbpRsJfa2@google.com Signed-off-by: Dmitry Torokhov Signed-off-by: Sasha Levin --- drivers/input/misc/uinput.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index d98212d55108..2c973f15cab7 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -417,6 +417,20 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, return -EINVAL; } + /* + * Limit number of contacts to a reasonable value (100). This + * ensures that we need less than 2 pages for struct input_mt + * (we are not using in-kernel slot assignment so not going to + * allocate memory for the "red" table), and we should have no + * trouble getting this much memory. + */ + if (code == ABS_MT_SLOT && max > 99) { + printk(KERN_DEBUG + "%s: unreasonably large number of slots requested: %d\n", + UINPUT_NAME, max); + return -EINVAL; + } + return 0; } From 51263e8399292aa4633b5a7824401998ada64256 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Tue, 6 Aug 2024 19:28:05 +0200 Subject: [PATCH 266/374] usbnet: ipheth: race between ipheth_close and error handling [ Upstream commit e5876b088ba03a62124266fa20d00e65533c7269 ] ipheth_sndbulk_callback() can submit carrier_work as a part of its error handling. That means that the driver must make sure that the work is cancelled after it has made sure that no more URB can terminate with an error condition. Hence the order of actions in ipheth_close() needs to be inverted. Signed-off-by: Oliver Neukum Signed-off-by: Foster Snowhill Tested-by: Georgi Valkov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/usb/ipheth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 687d70cfc556..6eeef10edada 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -475,8 +475,8 @@ static int ipheth_close(struct net_device *net) { struct ipheth_device *dev = netdev_priv(net); - cancel_delayed_work_sync(&dev->carrier_work); netif_stop_queue(net); + cancel_delayed_work_sync(&dev->carrier_work); return 0; } From ef4e249971eb77ec33d74c5c3de1e2576faf6c90 Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Mon, 12 Aug 2024 00:28:21 +0100 Subject: [PATCH 267/374] Squashfs: sanity check symbolic link size [ Upstream commit 810ee43d9cd245d138a2733d87a24858a23f577d ] Syzkiller reports a "KMSAN: uninit-value in pick_link" bug. This is caused by an uninitialised page, which is ultimately caused by a corrupted symbolic link size read from disk. The reason why the corrupted symlink size causes an uninitialised page is due to the following sequence of events: 1. squashfs_read_inode() is called to read the symbolic link from disk. This assigns the corrupted value 3875536935 to inode->i_size. 2. Later squashfs_symlink_read_folio() is called, which assigns this corrupted value to the length variable, which being a signed int, overflows producing a negative number. 3. The following loop that fills in the page contents checks that the copied bytes is less than length, which being negative means the loop is skipped, producing an uninitialised page. This patch adds a sanity check which checks that the symbolic link size is not larger than expected. -- Signed-off-by: Phillip Lougher Link: https://lore.kernel.org/r/20240811232821.13903-1-phillip@squashfs.org.uk Reported-by: Lizhi Xu Reported-by: syzbot+24ac24ff58dc5b0d26b9@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/000000000000a90e8c061e86a76b@google.com/ V2: fix spelling mistake. Signed-off-by: Christian Brauner Signed-off-by: Sasha Levin --- fs/squashfs/inode.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c index 16bd693d0b3a..d5918eba27e3 100644 --- a/fs/squashfs/inode.c +++ b/fs/squashfs/inode.c @@ -279,8 +279,13 @@ int squashfs_read_inode(struct inode *inode, long long ino) if (err < 0) goto failed_read; - set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_size = le32_to_cpu(sqsh_ino->symlink_size); + if (inode->i_size > PAGE_SIZE) { + ERROR("Corrupted symlink\n"); + return -EINVAL; + } + + set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_op = &squashfs_symlink_inode_ops; inode_nohighmem(inode); inode->i_data.a_ops = &squashfs_symlink_aops; From bf68acd840b6a5bfd3777e0d5aaa204db6b461a9 Mon Sep 17 00:00:00 2001 From: Stefan Wiehler Date: Mon, 12 Aug 2024 12:06:51 +0200 Subject: [PATCH 268/374] of/irq: Prevent device address out-of-bounds read in interrupt map walk [ Upstream commit b739dffa5d570b411d4bdf4bb9b8dfd6b7d72305 ] When of_irq_parse_raw() is invoked with a device address smaller than the interrupt parent node (from #address-cells property), KASAN detects the following out-of-bounds read when populating the initial match table (dyndbg="func of_irq_parse_* +p"): OF: of_irq_parse_one: dev=/soc@0/picasso/watchdog, index=0 OF: parent=/soc@0/pci@878000000000/gpio0@17,0, intsize=2 OF: intspec=4 OF: of_irq_parse_raw: ipar=/soc@0/pci@878000000000/gpio0@17,0, size=2 OF: -> addrsize=3 ================================================================== BUG: KASAN: slab-out-of-bounds in of_irq_parse_raw+0x2b8/0x8d0 Read of size 4 at addr ffffff81beca5608 by task bash/764 CPU: 1 PID: 764 Comm: bash Tainted: G O 6.1.67-484c613561-nokia_sm_arm64 #1 Hardware name: Unknown Unknown Product/Unknown Product, BIOS 2023.01-12.24.03-dirty 01/01/2023 Call trace: dump_backtrace+0xdc/0x130 show_stack+0x1c/0x30 dump_stack_lvl+0x6c/0x84 print_report+0x150/0x448 kasan_report+0x98/0x140 __asan_load4+0x78/0xa0 of_irq_parse_raw+0x2b8/0x8d0 of_irq_parse_one+0x24c/0x270 parse_interrupts+0xc0/0x120 of_fwnode_add_links+0x100/0x2d0 fw_devlink_parse_fwtree+0x64/0xc0 device_add+0xb38/0xc30 of_device_add+0x64/0x90 of_platform_device_create_pdata+0xd0/0x170 of_platform_bus_create+0x244/0x600 of_platform_notify+0x1b0/0x254 blocking_notifier_call_chain+0x9c/0xd0 __of_changeset_entry_notify+0x1b8/0x230 __of_changeset_apply_notify+0x54/0xe4 of_overlay_fdt_apply+0xc04/0xd94 ... The buggy address belongs to the object at ffffff81beca5600 which belongs to the cache kmalloc-128 of size 128 The buggy address is located 8 bytes inside of 128-byte region [ffffff81beca5600, ffffff81beca5680) The buggy address belongs to the physical page: page:00000000230d3d03 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x1beca4 head:00000000230d3d03 order:1 compound_mapcount:0 compound_pincount:0 flags: 0x8000000000010200(slab|head|zone=2) raw: 8000000000010200 0000000000000000 dead000000000122 ffffff810000c300 raw: 0000000000000000 0000000000200020 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffffff81beca5500: 04 fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffffff81beca5580: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc >ffffff81beca5600: 00 fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ^ ffffff81beca5680: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffffff81beca5700: 00 00 00 00 00 00 fc fc fc fc fc fc fc fc fc fc ================================================================== OF: -> got it ! Prevent the out-of-bounds read by copying the device address into a buffer of sufficient size. Signed-off-by: Stefan Wiehler Link: https://lore.kernel.org/r/20240812100652.3800963-1-stefan.wiehler@nokia.com Signed-off-by: Rob Herring (Arm) Signed-off-by: Sasha Levin --- drivers/of/irq.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/of/irq.c b/drivers/of/irq.c index c94203ce65bb..8fd63100ba8f 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -344,7 +344,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar struct device_node *p; const __be32 *addr; u32 intsize; - int i, res; + int i, res, addr_len; + __be32 addr_buf[3] = { 0 }; pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index); @@ -353,13 +354,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar return of_irq_parse_oldworld(device, index, out_irq); /* Get the reg property (if any) */ - addr = of_get_property(device, "reg", NULL); + addr = of_get_property(device, "reg", &addr_len); + + /* Prevent out-of-bounds read in case of longer interrupt parent address size */ + if (addr_len > (3 * sizeof(__be32))) + addr_len = 3 * sizeof(__be32); + if (addr) + memcpy(addr_buf, addr, addr_len); /* Try the new-style interrupts-extended first */ res = of_parse_phandle_with_args(device, "interrupts-extended", "#interrupt-cells", index, out_irq); if (!res) - return of_irq_parse_raw(addr, out_irq); + return of_irq_parse_raw(addr_buf, out_irq); /* Look for the interrupt parent. */ p = of_irq_find_parent(device); @@ -389,7 +396,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar /* Check if there are any interrupt-map translations to process */ - res = of_irq_parse_raw(addr, out_irq); + res = of_irq_parse_raw(addr_buf, out_irq); out: of_node_put(p); return res; From 0f078f8ca93b28a34e20bd050f12cd4efeee7c0f Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 10 Aug 2024 21:04:35 -0400 Subject: [PATCH 269/374] lib/generic-radix-tree.c: Fix rare race in __genradix_ptr_alloc() [ Upstream commit b2f11c6f3e1fc60742673b8675c95b78447f3dae ] If we need to increase the tree depth, allocate a new node, and then race with another thread that increased the tree depth before us, we'll still have a preallocated node that might be used later. If we then use that node for a new non-root node, it'll still have a pointer to the old root instead of being zeroed - fix this by zeroing it in the cmpxchg failure path. Signed-off-by: Kent Overstreet Signed-off-by: Sasha Levin --- lib/generic-radix-tree.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c index aaefb9b678c8..fa692c86f069 100644 --- a/lib/generic-radix-tree.c +++ b/lib/generic-radix-tree.c @@ -121,6 +121,8 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset, if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) { v = new_root; new_node = NULL; + } else { + new_node->children[0] = NULL; } } From c33a9806dc806bcb4a31dc71fb06979219181ad4 Mon Sep 17 00:00:00 2001 From: Peiyang Wang Date: Tue, 13 Aug 2024 22:10:23 +0800 Subject: [PATCH 270/374] net: hns3: void array out of bound when loop tnl_num [ Upstream commit 86db7bfb06704ef17340eeae71c832f21cfce35c ] When query reg inf of SSU, it loops tnl_num times. However, tnl_num comes from hardware and the length of array is a fixed value. To void array out of bound, make sure the loop time is not greater than the length of array Signed-off-by: Peiyang Wang Signed-off-by: Jijie Shao Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index e132c2f09560..cc7f46c0b35f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1598,8 +1598,7 @@ static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev) { u32 loop_para[HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE] = {0}; struct hclge_mod_reg_common_msg msg; - u8 i, j, num; - u32 loop_time; + u8 i, j, num, loop_time; num = ARRAY_SIZE(hclge_ssu_reg_common_msg); for (i = 0; i < num; i++) { @@ -1609,7 +1608,8 @@ static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev) loop_time = 1; loop_para[0] = 0; if (msg.need_para) { - loop_time = hdev->ae_dev->dev_specs.tnl_num; + loop_time = min(hdev->ae_dev->dev_specs.tnl_num, + HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE); for (j = 0; j < loop_time; j++) loop_para[j] = j + 1; } From d1207f07decc66546a7fa463d2f335a856c986ef Mon Sep 17 00:00:00 2001 From: Ivan Orlov Date: Thu, 15 Aug 2024 01:04:31 +0100 Subject: [PATCH 271/374] kunit/overflow: Fix UB in overflow_allocation_test [ Upstream commit 92e9bac18124682c4b99ede9ee3bcdd68f121e92 ] The 'device_name' array doesn't exist out of the 'overflow_allocation_test' function scope. However, it is being used as a driver name when calling 'kunit_driver_create' from 'kunit_device_register'. It produces the kernel panic with KASAN enabled. Since this variable is used in one place only, remove it and pass the device name into kunit_device_register directly as an ascii string. Signed-off-by: Ivan Orlov Reviewed-by: David Gow Link: https://lore.kernel.org/r/20240815000431.401869-1-ivan.orlov0322@gmail.com Signed-off-by: Kees Cook Signed-off-by: Sasha Levin --- lib/overflow_kunit.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index d305b0c054bb..9249181fff37 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -668,7 +668,6 @@ DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0); static void overflow_allocation_test(struct kunit *test) { - const char device_name[] = "overflow-test"; struct device *dev; int count = 0; @@ -678,7 +677,7 @@ static void overflow_allocation_test(struct kunit *test) } while (0) /* Create dummy device for devm_kmalloc()-family tests. */ - dev = kunit_device_register(test, device_name); + dev = kunit_device_register(test, "overflow-test"); KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), "Cannot register test device\n"); From 189d3ed3b25beee26ffe2abed278208bece13f52 Mon Sep 17 00:00:00 2001 From: Jiaxun Yang Date: Tue, 13 Aug 2024 10:59:08 +0100 Subject: [PATCH 272/374] MIPS: cevt-r4k: Don't call get_c0_compare_int if timer irq is installed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 50f2b98dc83de7809a5c5bf0ccf9af2e75c37c13 ] This avoids warning: [ 0.118053] BUG: sleeping function called from invalid context at kernel/locking/mutex.c:283 Caused by get_c0_compare_int on secondary CPU. We also skipped saving IRQ number to struct clock_event_device *cd as it's never used by clockevent core, as per comments it's only meant for "non CPU local devices". Reported-by: Serge Semin Closes: https://lore.kernel.org/linux-mips/6szkkqxpsw26zajwysdrwplpjvhl5abpnmxgu2xuj3dkzjnvsf@4daqrz4mf44k/ Signed-off-by: Jiaxun Yang Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Serge Semin Tested-by: Serge Semin Signed-off-by: Thomas Bogendoerfer Signed-off-by: Sasha Levin --- arch/mips/kernel/cevt-r4k.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 368e8475870f..5f6e9e2ebbdb 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -303,13 +303,6 @@ int r4k_clockevent_init(void) if (!c0_compare_int_usable()) return -ENXIO; - /* - * With vectored interrupts things are getting platform specific. - * get_c0_compare_int is a hook to allow a platform to return the - * interrupt number of its liking. - */ - irq = get_c0_compare_int(); - cd = &per_cpu(mips_clockevent_device, cpu); cd->name = "MIPS"; @@ -320,7 +313,6 @@ int r4k_clockevent_init(void) min_delta = calculate_min_delta(); cd->rating = 300; - cd->irq = irq; cd->cpumask = cpumask_of(cpu); cd->set_next_event = mips_next_event; cd->event_handler = mips_event_handler; @@ -332,6 +324,13 @@ int r4k_clockevent_init(void) cp0_timer_irq_installed = 1; + /* + * With vectored interrupts things are getting platform specific. + * get_c0_compare_int is a hook to allow a platform to return the + * interrupt number of its liking. + */ + irq = get_c0_compare_int(); + if (request_irq(irq, c0_compare_interrupt, flags, "timer", c0_compare_interrupt)) pr_err("Failed to request irq %d (timer)\n", irq); From 9cb25a390a4194949981221253ff18077c64a710 Mon Sep 17 00:00:00 2001 From: Carlos Song Date: Tue, 20 Aug 2024 15:06:58 +0800 Subject: [PATCH 273/374] spi: spi-fsl-lpspi: limit PRESCALE bit in TCR register [ Upstream commit 783bf5d09f86b9736605f3e01a3472e55ef98ff8 ] Referring to the errata ERR051608 of I.MX93, LPSPI TCR[PRESCALE] can only be configured to be 0 or 1, other values are not valid and will cause LPSPI to not work. Add the prescale limitation for LPSPI in I.MX93. Other platforms are not affected. Signed-off-by: Carlos Song Link: https://patch.msgid.link/20240820070658.672127-1-carlos.song@nxp.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/spi/spi-fsl-lpspi.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index f2d7eedd324b..30d56f8775d7 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -82,6 +82,10 @@ #define TCR_RXMSK BIT(19) #define TCR_TXMSK BIT(18) +struct fsl_lpspi_devtype_data { + u8 prescale_max; +}; + struct lpspi_config { u8 bpw; u8 chip_select; @@ -119,10 +123,25 @@ struct fsl_lpspi_data { bool usedma; struct completion dma_rx_completion; struct completion dma_tx_completion; + + const struct fsl_lpspi_devtype_data *devtype_data; +}; + +/* + * ERR051608 fixed or not: + * https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf + */ +static struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = { + .prescale_max = 1, +}; + +static struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = { + .prescale_max = 8, }; static const struct of_device_id fsl_lpspi_dt_ids[] = { - { .compatible = "fsl,imx7ulp-spi", }, + { .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,}, + { .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids); @@ -297,9 +316,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) { struct lpspi_config config = fsl_lpspi->config; unsigned int perclk_rate, scldiv, div; + u8 prescale_max; u8 prescale; perclk_rate = clk_get_rate(fsl_lpspi->clk_per); + prescale_max = fsl_lpspi->devtype_data->prescale_max; if (!config.speed_hz) { dev_err(fsl_lpspi->dev, @@ -315,7 +336,7 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) div = DIV_ROUND_UP(perclk_rate, config.speed_hz); - for (prescale = 0; prescale < 8; prescale++) { + for (prescale = 0; prescale < prescale_max; prescale++) { scldiv = div / (1 << prescale) - 2; if (scldiv < 256) { fsl_lpspi->config.prescale = prescale; @@ -822,6 +843,7 @@ static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi) static int fsl_lpspi_probe(struct platform_device *pdev) { + const struct fsl_lpspi_devtype_data *devtype_data; struct fsl_lpspi_data *fsl_lpspi; struct spi_controller *controller; struct resource *res; @@ -830,6 +852,10 @@ static int fsl_lpspi_probe(struct platform_device *pdev) u32 temp; bool is_target; + devtype_data = of_device_get_match_data(&pdev->dev); + if (!devtype_data) + return -ENODEV; + is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave"); if (is_target) controller = devm_spi_alloc_target(&pdev->dev, @@ -848,6 +874,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev) fsl_lpspi->is_target = is_target; fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node, "fsl,spi-only-use-cs1-sel"); + fsl_lpspi->devtype_data = devtype_data; init_completion(&fsl_lpspi->xfer_done); From b8186f5cfa4fe7ebb018699105267c0b7d3c2569 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 20 Aug 2024 13:04:07 +1000 Subject: [PATCH 274/374] ata: pata_macio: Use WARN instead of BUG [ Upstream commit d4bc0a264fb482b019c84fbc7202dd3cab059087 ] The overflow/underflow conditions in pata_macio_qc_prep() should never happen. But if they do there's no need to kill the system entirely, a WARN and failing the IO request should be sufficient and might allow the system to keep running. Signed-off-by: Michael Ellerman Signed-off-by: Damien Le Moal Signed-off-by: Sasha Levin --- drivers/ata/pata_macio.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 99fc5d9d95d7..cac022eb1492 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -554,7 +554,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc) while (sg_len) { /* table overflow should never happen */ - BUG_ON (pi++ >= MAX_DCMDS); + if (WARN_ON_ONCE(pi >= MAX_DCMDS)) + return AC_ERR_SYSTEM; len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG; table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE); @@ -566,11 +567,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc) addr += len; sg_len -= len; ++table; + ++pi; } } /* Should never happen according to Tejun */ - BUG_ON(!pi); + if (WARN_ON_ONCE(!pi)) + return AC_ERR_SYSTEM; /* Convert the last command to an input/output */ table--; From 3b692794b81f2ecad69a4adbba687f3836824ada Mon Sep 17 00:00:00 2001 From: ChenXiaoSong Date: Thu, 22 Aug 2024 08:20:51 +0000 Subject: [PATCH 275/374] smb/server: fix potential null-ptr-deref of lease_ctx_info in smb2_open() [ Upstream commit 4e8771a3666c8f216eefd6bd2fd50121c6c437db ] null-ptr-deref will occur when (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) and parse_lease_state() return NULL. Fix this by check if 'lease_ctx_info' is NULL. Additionally, remove the redundant parentheses in parse_durable_handle_context(). Signed-off-by: ChenXiaoSong Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/smb/server/oplock.c | 2 +- fs/smb/server/smb2pdu.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c index a8f52c4ebbda..e546ffa57b55 100644 --- a/fs/smb/server/oplock.c +++ b/fs/smb/server/oplock.c @@ -1510,7 +1510,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease) * parse_lease_state() - parse lease context containted in file open request * @open_req: buffer containing smb2 file open(create) request * - * Return: oplock state, -ENOENT if create lease context not found + * Return: allocated lease context object on success, otherwise NULL */ struct lease_ctx_info *parse_lease_state(void *open_req) { diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index bc69b94df40f..39dfecf082ba 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -2771,8 +2771,8 @@ static int parse_durable_handle_context(struct ksmbd_work *work, } } - if (((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) || - req_op_level == SMB2_OPLOCK_LEVEL_BATCH)) { + if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) || + req_op_level == SMB2_OPLOCK_LEVEL_BATCH) { dh_info->CreateGuid = durable_v2_blob->CreateGuid; dh_info->persistent = @@ -2792,8 +2792,8 @@ static int parse_durable_handle_context(struct ksmbd_work *work, goto out; } - if (((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) || - req_op_level == SMB2_OPLOCK_LEVEL_BATCH)) { + if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) || + req_op_level == SMB2_OPLOCK_LEVEL_BATCH) { ksmbd_debug(SMB, "Request for durable open\n"); dh_info->type = dh_idx; } @@ -3415,7 +3415,7 @@ int smb2_open(struct ksmbd_work *work) goto err_out1; } } else { - if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) { + if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE && lc) { if (S_ISDIR(file_inode(filp)->i_mode)) { lc->req_state &= ~SMB2_LEASE_WRITE_CACHING_LE; lc->is_dir = true; From f9f0b593baeff92766d424e97112a63b4f25b621 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 21 Aug 2024 14:05:00 -0400 Subject: [PATCH 276/374] NFSv4: Add missing rescheduling points in nfs_client_return_marked_delegations [ Upstream commit a017ad1313fc91bdf235097fd0a02f673fc7bb11 ] We're seeing reports of soft lockups when iterating through the loops, so let's add rescheduling points. Signed-off-by: Trond Myklebust Reviewed-by: Jeff Layton Signed-off-by: Anna Schumaker Signed-off-by: Sasha Levin --- fs/nfs/super.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/nfs/super.c b/fs/nfs/super.c index cbbd4866b0b7..97b386032b71 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -228,6 +229,7 @@ static int __nfs_list_for_each_server(struct list_head *head, ret = fn(server, data); if (ret) goto out; + cond_resched(); rcu_read_lock(); } rcu_read_unlock(); From 1f490704c6169e95585c1cf47299dbf54d401469 Mon Sep 17 00:00:00 2001 From: Yunxiang Li Date: Mon, 22 Apr 2024 14:59:02 -0400 Subject: [PATCH 277/374] drm/amdgpu: Fix two reset triggered in a row [ Upstream commit f4322b9f8ad5f9f62add288c785d2e10bb6a5efe ] Some times a hang GPU causes multiple reset sources to schedule resets. The second source will be able to trigger an unnecessary reset if they schedule after we call amdgpu_device_stop_pending_resets. Move amdgpu_device_stop_pending_resets to after the reset is done. Since at this point the GPU is supposedly in a good state, any reset scheduled after this point would be a legitimate reset. Remove unnecessary and incorrect checks for amdgpu_in_reset that was kinda serving this purpose. Signed-off-by: Yunxiang Li Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher Stable-dep-of: 6e4aa08fa9c6 ("drm/amdgpu: Fix amdgpu_device_reset_sriov retry logic") Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 19 ++++++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 2 +- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d24d7a108624..9f7f96be1ac7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5072,8 +5072,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, retry: amdgpu_amdkfd_pre_reset(adev); - amdgpu_device_stop_pending_resets(adev); - if (from_hypervisor) r = amdgpu_virt_request_full_gpu(adev, true); else @@ -5828,13 +5826,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, r, adev_to_drm(tmp_adev)->unique); tmp_adev->asic_reset_res = r; } - - if (!amdgpu_sriov_vf(tmp_adev)) - /* - * Drop all pending non scheduler resets. Scheduler resets - * were already dropped during drm_sched_stop - */ - amdgpu_device_stop_pending_resets(tmp_adev); } /* Actual ASIC resets if needed.*/ @@ -5856,6 +5847,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, goto retry; } + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + /* + * Drop any pending non scheduler resets queued before reset is done. + * Any reset scheduled after this point would be valid. Scheduler resets + * were already dropped during drm_sched_stop and no new ones can come + * in before drm_sched_start. + */ + amdgpu_device_stop_pending_resets(tmp_adev); + } + skip_hw_reset: /* Post ASIC reset for all devs .*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 26cea0076c9b..e12d179a451b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -601,7 +601,7 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) if (ret) { adev->virt.vf2pf_update_retry_cnt++; if ((adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) && - amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) { + amdgpu_sriov_runtime(adev)) { amdgpu_ras_set_fed(adev, true); if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->kfd.reset_work)) diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 0c7275bca8f7..c5ba9c4757a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -319,7 +319,7 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, switch (event) { case IDH_FLR_NOTIFICATION: - if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) + if (amdgpu_sriov_runtime(adev)) WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, &adev->virt.flr_work), "Failed to queue work! at %s", diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index aba00d961627..fa9d1b02f391 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -358,7 +358,7 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev, switch (event) { case IDH_FLR_NOTIFICATION: - if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) + if (amdgpu_sriov_runtime(adev)) WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, &adev->virt.flr_work), "Failed to queue work! at %s", diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 59f53c743362..14a065516ae4 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -560,7 +560,7 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev, r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); /* only handle FLR_NOTIFY now */ - if (!r && !amdgpu_in_reset(adev)) + if (!r) WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, &adev->virt.flr_work), "Failed to queue work! at %s", From 3adb4ae45e42a9f537071a03f9814f9c6c4f32ed Mon Sep 17 00:00:00 2001 From: Yunxiang Li Date: Mon, 22 Apr 2024 14:44:38 -0400 Subject: [PATCH 278/374] drm/amdgpu: Add reset_context flag for host FLR [ Upstream commit 25c01191c2555351922e5515b6b6d31357975031 ] There are other reset sources that pass NULL as the job pointer, such as amdgpu_amdkfd_reset_work. Therefore, using the job pointer to check if the FLR comes from the host does not work. Add a flag in reset_context to explicitly mark host triggered reset, and set this flag when we receive host reset notification. Signed-off-by: Yunxiang Li Reviewed-by: Emily Deng Reviewed-by: Zhigang Luo Signed-off-by: Alex Deucher Stable-dep-of: 6e4aa08fa9c6 ("drm/amdgpu: Fix amdgpu_device_reset_sriov retry logic") Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 13 ++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h | 1 + drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 1 + drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c | 1 + drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 1 + 5 files changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9f7f96be1ac7..bd6f2aba0662 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5057,13 +5057,13 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf * * @adev: amdgpu_device pointer - * @from_hypervisor: request from hypervisor + * @reset_context: amdgpu reset context pointer * * do VF FLR and reinitialize Asic * return 0 means succeeded otherwise failed */ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, - bool from_hypervisor) + struct amdgpu_reset_context *reset_context) { int r; struct amdgpu_hive_info *hive = NULL; @@ -5072,12 +5072,15 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, retry: amdgpu_amdkfd_pre_reset(adev); - if (from_hypervisor) + if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) { + clear_bit(AMDGPU_HOST_FLR, &reset_context->flags); r = amdgpu_virt_request_full_gpu(adev, true); - else + } else { r = amdgpu_virt_reset_gpu(adev); + } if (r) return r; + amdgpu_ras_set_fed(adev, false); amdgpu_irq_gpu_reset_resume_helper(adev); @@ -5831,7 +5834,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, /* Actual ASIC resets if needed.*/ /* Host driver will handle XGMI hive reset for SRIOV */ if (amdgpu_sriov_vf(adev)) { - r = amdgpu_device_reset_sriov(adev, job ? false : true); + r = amdgpu_device_reset_sriov(adev, reset_context); if (r) adev->asic_reset_res = r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index b11d190ece53..5a9cc043b858 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -33,6 +33,7 @@ enum AMDGPU_RESET_FLAGS { AMDGPU_NEED_FULL_RESET = 0, AMDGPU_SKIP_HW_RESET = 1, AMDGPU_SKIP_COREDUMP = 2, + AMDGPU_HOST_FLR = 3, }; struct amdgpu_reset_context { diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index c5ba9c4757a8..f4c47492e0cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -292,6 +292,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + set_bit(AMDGPU_HOST_FLR, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index fa9d1b02f391..14cc7910e5cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -328,6 +328,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + set_bit(AMDGPU_HOST_FLR, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 14a065516ae4..78cd07744ebe 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -529,6 +529,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + set_bit(AMDGPU_HOST_FLR, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } From e30b013e24da0f23b5a102f5208bdfdaaa4fd05e Mon Sep 17 00:00:00 2001 From: Yunxiang Li Date: Mon, 22 Apr 2024 15:04:52 -0400 Subject: [PATCH 279/374] drm/amdgpu: Fix amdgpu_device_reset_sriov retry logic [ Upstream commit 6e4aa08fa9c6c0c027fc86f242517c925d159393 ] The retry loop for SRIOV reset have refcount and memory leak issue. Depending on which function call fails it can potentially call amdgpu_amdkfd_pre/post_reset different number of times and causes kfd_locked count to be wrong. This will block all future attempts at opening /dev/kfd. The retry loop also leakes resources by calling amdgpu_virt_init_data_exchange multiple times without calling the corresponding fini function. Align with the bare-metal reset path which doesn't have these issues. This means taking the amdgpu_amdkfd_pre/post_reset functions out of the reset loop and calling amdgpu_device_pre_asic_reset each retry which properly free the resources from previous try by calling amdgpu_virt_fini_data_exchange. Signed-off-by: Yunxiang Li Reviewed-by: Emily Deng Reviewed-by: Zhigang Luo Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 47 ++++++++++------------ 1 file changed, 22 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index bd6f2aba0662..e66546df0bc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5067,10 +5067,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, { int r; struct amdgpu_hive_info *hive = NULL; - int retry_limit = 0; - -retry: - amdgpu_amdkfd_pre_reset(adev); if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) { clear_bit(AMDGPU_HOST_FLR, &reset_context->flags); @@ -5090,7 +5086,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) - goto error; + return r; amdgpu_virt_init_data_exchange(adev); @@ -5101,38 +5097,35 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, /* now we are okay to resume SMC/CP/SDMA */ r = amdgpu_device_ip_reinit_late_sriov(adev); if (r) - goto error; + return r; hive = amdgpu_get_xgmi_hive(adev); /* Update PSP FW topology after reset */ if (hive && adev->gmc.xgmi.num_physical_nodes > 1) r = amdgpu_xgmi_update_topology(hive, adev); - if (hive) amdgpu_put_xgmi_hive(hive); + if (r) + return r; - if (!r) { - r = amdgpu_ib_ring_tests(adev); - - amdgpu_amdkfd_post_reset(adev); - } + r = amdgpu_ib_ring_tests(adev); + if (r) + return r; -error: - if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { + if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { amdgpu_inc_vram_lost(adev); r = amdgpu_device_recover_vram(adev); } - amdgpu_virt_release_full_gpu(adev, true); + if (r) + return r; - if (AMDGPU_RETRY_SRIOV_RESET(r)) { - if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) { - retry_limit++; - goto retry; - } else - DRM_ERROR("GPU reset retry is beyond the retry limit\n"); - } + /* need to be called during full access so we can't do it later like + * bare-metal does. + */ + amdgpu_amdkfd_post_reset(adev); + amdgpu_virt_release_full_gpu(adev, true); - return r; + return 0; } /** @@ -5694,6 +5687,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, int i, r = 0; bool need_emergency_restart = false; bool audio_suspended = false; + int retry_limit = AMDGPU_MAX_RETRY_LIMIT; /* * Special case: RAS triggered and full reset isn't supported @@ -5775,8 +5769,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, cancel_delayed_work_sync(&tmp_adev->delayed_init_work); - if (!amdgpu_sriov_vf(tmp_adev)) - amdgpu_amdkfd_pre_reset(tmp_adev); + amdgpu_amdkfd_pre_reset(tmp_adev); /* * Mark these ASICs to be reseted as untracked first @@ -5835,6 +5828,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, /* Host driver will handle XGMI hive reset for SRIOV */ if (amdgpu_sriov_vf(adev)) { r = amdgpu_device_reset_sriov(adev, reset_context); + if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) { + amdgpu_virt_release_full_gpu(adev, true); + goto retry; + } if (r) adev->asic_reset_res = r; From 98d1b6b48628db5dbd4d0f5d6827222c807de8e8 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 4 Jul 2024 17:00:19 +0200 Subject: [PATCH 280/374] fs: only copy to userspace on success in listmount() commit 8d42877ad65b02741c9099392a001b7209baa5d4 upstream. Avoid copying when we failed to, or didn't have any mounts to list. Fixes: cb54ef4f050e ("fs: don't copy to userspace under namespace semaphore") # mainline only Signed-off-by: Christian Brauner Signed-off-by: Greg Kroah-Hartman --- fs/namespace.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/namespace.c b/fs/namespace.c index 4494064205a6..1390e9e521d6 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -5138,6 +5138,8 @@ SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, scoped_guard(rwsem_read, &namespace_sem) ret = do_listmount(kreq.mnt_id, kreq.param, kmnt_ids, nr_mnt_ids); + if (ret <= 0) + return ret; if (copy_to_user(mnt_ids, kmnt_ids, ret * sizeof(*mnt_ids))) return -EFAULT; From 84dc375c8350a3e11e1fc1f9e23f02dd67fc49fd Mon Sep 17 00:00:00 2001 From: "Matthieu Baerts (NGI0)" Date: Wed, 24 Jul 2024 12:25:16 +0200 Subject: [PATCH 281/374] tcp: process the 3rd ACK with sk_socket for TFO/MPTCP commit c1668292689ad2ee16c9c1750a8044b0b0aad663 upstream. The 'Fixes' commit recently changed the behaviour of TCP by skipping the processing of the 3rd ACK when a sk->sk_socket is set. The goal was to skip tcp_ack_snd_check() in tcp_rcv_state_process() not to send an unnecessary ACK in case of simultaneous connect(). Unfortunately, that had an impact on TFO and MPTCP. I started to look at the impact on MPTCP, because the MPTCP CI found some issues with the MPTCP Packetdrill tests [1]. Then Paolo Abeni suggested me to look at the impact on TFO with "plain" TCP. For MPTCP, when receiving the 3rd ACK of a request adding a new path (MP_JOIN), sk->sk_socket will be set, and point to the MPTCP sock that has been created when the MPTCP connection got established before with the first path. The newly added 'goto' will then skip the processing of the segment text (step 7) and not go through tcp_data_queue() where the MPTCP options are validated, and some actions are triggered, e.g. sending the MPJ 4th ACK [2] as demonstrated by the new errors when running a packetdrill test [3] establishing a second subflow. This doesn't fully break MPTCP, mainly the 4th MPJ ACK that will be delayed. Still, we don't want to have this behaviour as it delays the switch to the fully established mode, and invalid MPTCP options in this 3rd ACK will not be caught any more. This modification also affects the MPTCP + TFO feature as well, and being the reason why the selftests started to be unstable the last few days [4]. For TFO, the existing 'basic-cookie-not-reqd' test [5] was no longer passing: if the 3rd ACK contains data, and the connection is accept()ed before receiving them, these data would no longer be processed, and thus not ACKed. One last thing about MPTCP, in case of simultaneous connect(), a fallback to TCP will be done, which seems fine: `../common/defaults.sh` 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_MPTCP) = 3 +0 connect(3, ..., ...) = -1 EINPROGRESS (Operation now in progress) +0 > S 0:0(0) +0 < S 0:0(0) win 1000 +0 > S. 0:0(0) ack 1 +0 < S. 0:0(0) ack 1 win 65535 +0 > . 1:1(0) ack 1 Simultaneous SYN-data crossing is also not supported by TFO, see [6]. Kuniyuki Iwashima suggested to restrict the processing to SYN+ACK only: that's a more generic solution than the one initially proposed, and also enough to fix the issues described above. Later on, Eric Dumazet mentioned that an ACK should still be sent in reaction to the second SYN+ACK that is received: not sending a DUPACK here seems wrong and could hurt: 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3 +0 connect(3, ..., ...) = -1 EINPROGRESS (Operation now in progress) +0 > S 0:0(0) +0 < S 0:0(0) win 1000 +0 > S. 0:0(0) ack 1 +0 < S. 0:0(0) ack 1 win 1000 +0 > . 1:1(0) ack 1 // <== Here So in this version, the 'goto consume' is dropped, to always send an ACK when switching from TCP_SYN_RECV to TCP_ESTABLISHED. This ACK will be seen as a DUPACK -- with DSACK if SACK has been negotiated -- in case of simultaneous SYN crossing: that's what is expected here. Link: https://github.com/multipath-tcp/mptcp_net-next/actions/runs/9936227696 [1] Link: https://datatracker.ietf.org/doc/html/rfc8684#fig_tokens [2] Link: https://github.com/multipath-tcp/packetdrill/blob/mptcp-net-next/gtests/net/mptcp/syscalls/accept.pkt#L28 [3] Link: https://netdev.bots.linux.dev/contest.html?executor=vmksft-mptcp-dbg&test=mptcp-connect-sh [4] Link: https://github.com/google/packetdrill/blob/master/gtests/net/tcp/fastopen/server/basic-cookie-not-reqd.pkt#L21 [5] Link: https://github.com/google/packetdrill/blob/master/gtests/net/tcp/fastopen/client/simultaneous-fast-open.pkt [6] Fixes: 23e89e8ee7be ("tcp: Don't drop SYN+ACK for simultaneous connect().") Suggested-by: Paolo Abeni Suggested-by: Kuniyuki Iwashima Suggested-by: Eric Dumazet Signed-off-by: Matthieu Baerts (NGI0) Reviewed-by: Eric Dumazet Link: https://patch.msgid.link/20240724-upstream-net-next-20240716-tcp-3rd-ack-consume-sk_socket-v3-1-d48339764ce9@kernel.org Signed-off-by: Paolo Abeni Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_input.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e0d870d3c9b8..f130eccade39 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6825,9 +6825,6 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tcp_fast_path_on(tp); if (sk->sk_shutdown & SEND_SHUTDOWN) tcp_shutdown(sk, SEND_SHUTDOWN); - - if (sk->sk_socket) - goto consume; break; case TCP_FIN_WAIT1: { From 8961b245e8f92bccbaacfbbdf69eba60e3e7c227 Mon Sep 17 00:00:00 2001 From: Aleksandr Mishin Date: Wed, 3 Jul 2024 18:45:06 +0300 Subject: [PATCH 282/374] staging: iio: frequency: ad9834: Validate frequency parameter value commit b48aa991758999d4e8f9296c5bbe388f293ef465 upstream. In ad9834_write_frequency() clk_get_rate() can return 0. In such case ad9834_calc_freqreg() call will lead to division by zero. Checking 'if (fout > (clk_freq / 2))' doesn't protect in case of 'fout' is 0. ad9834_write_frequency() is called from ad9834_write(), where fout is taken from text buffer, which can contain any value. Modify parameters checking. Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 12b9d5bf76bf ("Staging: IIO: DDS: AD9833 / AD9834 driver") Suggested-by: Dan Carpenter Signed-off-by: Aleksandr Mishin Reviewed-by: Dan Carpenter Link: https://patch.msgid.link/20240703154506.25584-1-amishin@t-argos.ru Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/staging/iio/frequency/ad9834.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c index a7a5cdcc6590..47e7d7e6d920 100644 --- a/drivers/staging/iio/frequency/ad9834.c +++ b/drivers/staging/iio/frequency/ad9834.c @@ -114,7 +114,7 @@ static int ad9834_write_frequency(struct ad9834_state *st, clk_freq = clk_get_rate(st->mclk); - if (fout > (clk_freq / 2)) + if (!clk_freq || fout > (clk_freq / 2)) return -EINVAL; regval = ad9834_calc_freqreg(clk_freq, fout); From 3d752f729e35bc2e097bae78c362881db19a8367 Mon Sep 17 00:00:00 2001 From: David Lechner Date: Tue, 23 Jul 2024 11:32:21 -0500 Subject: [PATCH 283/374] iio: buffer-dmaengine: fix releasing dma channel on error commit 84c65d8008764a8fb4e627ff02de01ec4245f2c4 upstream. If dma_get_slave_caps() fails, we need to release the dma channel before returning an error to avoid leaking the channel. Fixes: 2d6ca60f3284 ("iio: Add a DMAengine framework based buffer") Signed-off-by: David Lechner Link: https://patch.msgid.link/20240723-iio-fix-dmaengine-free-on-error-v1-1-2c7cbc9b92ff@baylibre.com Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/buffer/industrialio-buffer-dmaengine.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index 918f6f8d65b6..0f50a99e8967 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -193,7 +193,7 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, ret = dma_get_slave_caps(chan, &caps); if (ret < 0) - goto err_free; + goto err_release; /* Needs to be aligned to the maximum of the minimums */ if (caps.src_addr_widths) @@ -219,6 +219,8 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, return &dmaengine_buffer->queue.buffer; +err_release: + dma_release_channel(chan); err_free: kfree(dmaengine_buffer); return ERR_PTR(ret); From b30d022f04a812f2065503aba90d721e31ac276a Mon Sep 17 00:00:00 2001 From: Matteo Martelli Date: Tue, 30 Jul 2024 10:11:53 +0200 Subject: [PATCH 284/374] iio: fix scale application in iio_convert_raw_to_processed_unlocked commit 8a3dcc970dc57b358c8db2702447bf0af4e0d83a upstream. When the scale_type is IIO_VAL_INT_PLUS_MICRO or IIO_VAL_INT_PLUS_NANO the scale passed as argument is only applied to the fractional part of the value. Fix it by also multiplying the integer part by the scale provided. Fixes: 48e44ce0f881 ("iio:inkern: Add function to read the processed value") Signed-off-by: Matteo Martelli Link: https://patch.msgid.link/20240730-iio-fix-scale-v1-1-6246638c8daa@gmail.com Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/inkern.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 39cf26d69d17..e1e1b31bf8b4 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -647,17 +647,17 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, break; case IIO_VAL_INT_PLUS_MICRO: if (scale_val2 < 0) - *processed = -raw64 * scale_val; + *processed = -raw64 * scale_val * scale; else - *processed = raw64 * scale_val; + *processed = raw64 * scale_val * scale; *processed += div_s64(raw64 * (s64)scale_val2 * scale, 1000000LL); break; case IIO_VAL_INT_PLUS_NANO: if (scale_val2 < 0) - *processed = -raw64 * scale_val; + *processed = -raw64 * scale_val * scale; else - *processed = raw64 * scale_val; + *processed = raw64 * scale_val * scale; *processed += div_s64(raw64 * (s64)scale_val2 * scale, 1000000000LL); break; From 9de24f9ef0f1dbe0b52b5a4e6f75ab43b743437e Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Maneyrol Date: Wed, 14 Aug 2024 14:37:35 +0000 Subject: [PATCH 285/374] iio: imu: inv_mpu6050: fix interrupt status read for old buggy chips commit 0a3b517c8089aa4cf339f41460d542c681409386 upstream. Interrupt status read seems to be broken on some old MPU-6050 like chips. Fix by reverting to previous driver behavior bypassing interrupt status read. This is working because these chips are not supporting WoM and data ready is the only interrupt source. Fixes: 5537f653d9be ("iio: imu: inv_mpu6050: add new interrupt handler for WoM events") Cc: stable@vger.kernel.org Signed-off-by: Jean-Baptiste Maneyrol Tested-by: Svyatoslav Ryhel # LG P895 Tested-by: Andreas Westman Dorcsak # LG P880 Link: https://patch.msgid.link/20240814143735.327302-1-inv.git-commit@tdk.com Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c index 84273660ca2e..3bfeabab0ec4 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c @@ -248,12 +248,20 @@ static irqreturn_t inv_mpu6050_interrupt_handle(int irq, void *p) int result; switch (st->chip_type) { + case INV_MPU6000: case INV_MPU6050: + case INV_MPU9150: + /* + * WoM is not supported and interrupt status read seems to be broken for + * some chips. Since data ready is the only interrupt, bypass interrupt + * status read and always assert data ready bit. + */ + wom_bits = 0; + int_status = INV_MPU6050_BIT_RAW_DATA_RDY_INT; + goto data_ready_interrupt; case INV_MPU6500: case INV_MPU6515: case INV_MPU6880: - case INV_MPU6000: - case INV_MPU9150: case INV_MPU9250: case INV_MPU9255: wom_bits = INV_MPU6500_BIT_WOM_INT; @@ -279,6 +287,7 @@ static irqreturn_t inv_mpu6050_interrupt_handle(int irq, void *p) } } +data_ready_interrupt: /* handle raw data interrupt */ if (int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT) { indio_dev->pollfunc->timestamp = st->it_timestamp; From 6c56053739ad499d0899b983c8e143c866dbbd92 Mon Sep 17 00:00:00 2001 From: Dumitru Ceclan Date: Wed, 31 Jul 2024 15:37:23 +0300 Subject: [PATCH 286/374] iio: adc: ad7124: fix config comparison commit 2f6b92d0f69f04d9e2ea0db1228ab7f82f3173af upstream. The ad7124_find_similar_live_cfg() computes the compare size by substracting the address of the cfg struct from the address of the live field. Because the live field is the first field in the struct, the result is 0. Also, the memcmp() call is made from the start of the cfg struct, which includes the live and cfg_slot fields, which are not relevant for the comparison. Fix by grouping the relevant fields with struct_group() and use the size of the group to compute the compare size; make the memcmp() call from the address of the group. Fixes: 7b8d045e497a ("iio: adc: ad7124: allow more than 8 channels") Signed-off-by: Dumitru Ceclan Reviewed-by: Nuno Sa Link: https://patch.msgid.link/20240731-ad7124-fix-v1-2-46a76aa4b9be@analog.com Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/ad7124.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index e7b1d517d3de..9819cdac933c 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -147,15 +147,18 @@ struct ad7124_chip_info { struct ad7124_channel_config { bool live; unsigned int cfg_slot; - enum ad7124_ref_sel refsel; - bool bipolar; - bool buf_positive; - bool buf_negative; - unsigned int vref_mv; - unsigned int pga_bits; - unsigned int odr; - unsigned int odr_sel_bits; - unsigned int filter_type; + /* Following fields are used to compare equality. */ + struct_group(config_props, + enum ad7124_ref_sel refsel; + bool bipolar; + bool buf_positive; + bool buf_negative; + unsigned int vref_mv; + unsigned int pga_bits; + unsigned int odr; + unsigned int odr_sel_bits; + unsigned int filter_type; + ); }; struct ad7124_channel { @@ -334,11 +337,12 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_ ptrdiff_t cmp_size; int i; - cmp_size = (u8 *)&cfg->live - (u8 *)cfg; + cmp_size = sizeof_field(struct ad7124_channel_config, config_props); for (i = 0; i < st->num_channels; i++) { cfg_aux = &st->channels[i].cfg; - if (cfg_aux->live && !memcmp(cfg, cfg_aux, cmp_size)) + if (cfg_aux->live && + !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size)) return cfg_aux; } From 0e79ed236b4d6d5ef595db6097e2ec10b7bde8d9 Mon Sep 17 00:00:00 2001 From: Guillaume Stols Date: Tue, 2 Jul 2024 12:52:51 +0000 Subject: [PATCH 287/374] iio: adc: ad7606: remove frstdata check for serial mode commit 90826e08468ba7fb35d8b39645b22d9e80004afe upstream. The current implementation attempts to recover from an eventual glitch in the clock by checking frstdata state after reading the first channel's sample: If frstdata is low, it will reset the chip and return -EIO. This will only work in parallel mode, where frstdata pin is set low after the 2nd sample read starts. For the serial mode, according to the datasheet, "The FRSTDATA output returns to a logic low following the 16th SCLK falling edge.", thus after the Xth pulse, X being the number of bits in a sample, the check will always be true, and the driver will not work at all in serial mode if frstdata(optional) is defined in the devicetree as it will reset the chip, and return -EIO every time read_sample is called. Hence, this check must be removed for serial mode. Fixes: b9618c0cacd7 ("staging: IIO: ADC: New driver for AD7606/AD7606-6/AD7606-4") Signed-off-by: Guillaume Stols Reviewed-by: Nuno Sa Link: https://patch.msgid.link/20240702-cleanup-ad7606-v3-1-18d5ea18770e@baylibre.com Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/ad7606.c | 28 ++------------------- drivers/iio/adc/ad7606.h | 2 ++ drivers/iio/adc/ad7606_par.c | 48 +++++++++++++++++++++++++++++++++--- 3 files changed, 49 insertions(+), 29 deletions(-) diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c index 1928d9ae5bcf..1c08c0921ee7 100644 --- a/drivers/iio/adc/ad7606.c +++ b/drivers/iio/adc/ad7606.c @@ -49,7 +49,7 @@ static const unsigned int ad7616_oversampling_avail[8] = { 1, 2, 4, 8, 16, 32, 64, 128, }; -static int ad7606_reset(struct ad7606_state *st) +int ad7606_reset(struct ad7606_state *st) { if (st->gpio_reset) { gpiod_set_value(st->gpio_reset, 1); @@ -60,6 +60,7 @@ static int ad7606_reset(struct ad7606_state *st) return -ENODEV; } +EXPORT_SYMBOL_NS_GPL(ad7606_reset, IIO_AD7606); static int ad7606_reg_access(struct iio_dev *indio_dev, unsigned int reg, @@ -88,31 +89,6 @@ static int ad7606_read_samples(struct ad7606_state *st) { unsigned int num = st->chip_info->num_channels - 1; u16 *data = st->data; - int ret; - - /* - * The frstdata signal is set to high while and after reading the sample - * of the first channel and low for all other channels. This can be used - * to check that the incoming data is correctly aligned. During normal - * operation the data should never become unaligned, but some glitch or - * electrostatic discharge might cause an extra read or clock cycle. - * Monitoring the frstdata signal allows to recover from such failure - * situations. - */ - - if (st->gpio_frstdata) { - ret = st->bops->read_block(st->dev, 1, data); - if (ret) - return ret; - - if (!gpiod_get_value(st->gpio_frstdata)) { - ad7606_reset(st); - return -EIO; - } - - data++; - num--; - } return st->bops->read_block(st->dev, num, data); } diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h index 0c6a88cc4695..6649e84d25de 100644 --- a/drivers/iio/adc/ad7606.h +++ b/drivers/iio/adc/ad7606.h @@ -151,6 +151,8 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address, const char *name, unsigned int id, const struct ad7606_bus_ops *bops); +int ad7606_reset(struct ad7606_state *st); + enum ad7606_supported_device_ids { ID_AD7605_4, ID_AD7606_8, diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c index d8408052262e..6bc587b20f05 100644 --- a/drivers/iio/adc/ad7606_par.c +++ b/drivers/iio/adc/ad7606_par.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -21,8 +22,29 @@ static int ad7606_par16_read_block(struct device *dev, struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7606_state *st = iio_priv(indio_dev); - insw((unsigned long)st->base_address, buf, count); + /* + * On the parallel interface, the frstdata signal is set to high while + * and after reading the sample of the first channel and low for all + * other channels. This can be used to check that the incoming data is + * correctly aligned. During normal operation the data should never + * become unaligned, but some glitch or electrostatic discharge might + * cause an extra read or clock cycle. Monitoring the frstdata signal + * allows to recover from such failure situations. + */ + int num = count; + u16 *_buf = buf; + + if (st->gpio_frstdata) { + insw((unsigned long)st->base_address, _buf, 1); + if (!gpiod_get_value(st->gpio_frstdata)) { + ad7606_reset(st); + return -EIO; + } + _buf++; + num--; + } + insw((unsigned long)st->base_address, _buf, num); return 0; } @@ -35,8 +57,28 @@ static int ad7606_par8_read_block(struct device *dev, { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7606_state *st = iio_priv(indio_dev); - - insb((unsigned long)st->base_address, buf, count * 2); + /* + * On the parallel interface, the frstdata signal is set to high while + * and after reading the sample of the first channel and low for all + * other channels. This can be used to check that the incoming data is + * correctly aligned. During normal operation the data should never + * become unaligned, but some glitch or electrostatic discharge might + * cause an extra read or clock cycle. Monitoring the frstdata signal + * allows to recover from such failure situations. + */ + int num = count; + u16 *_buf = buf; + + if (st->gpio_frstdata) { + insb((unsigned long)st->base_address, _buf, 2); + if (!gpiod_get_value(st->gpio_frstdata)) { + ad7606_reset(st); + return -EIO; + } + _buf++; + num--; + } + insb((unsigned long)st->base_address, _buf, num * 2); return 0; } From 6e4bf8e79966f6e4cc7e21d9f8d1eb0fbe5b8b9b Mon Sep 17 00:00:00 2001 From: Nuno Sa Date: Tue, 6 Aug 2024 17:40:49 +0200 Subject: [PATCH 288/374] iio: adc: ad_sigma_delta: fix irq_flags on irq request commit e81bb580ec08d7503c14c92157d810d306290003 upstream. With commit 7b0c9f8fa3d2 ("iio: adc: ad_sigma_delta: Add optional irq selection"), we can get the irq line from struct ad_sigma_delta_info instead of the spi device. However, in devm_ad_sd_probe_trigger(), when getting the irq_flags with irq_get_trigger_type() we are still using the spi device irq instead of the one used for devm_request_irq(). Fixes: 7b0c9f8fa3d2 ("iio: adc: ad_sigma_delta: Add optional irq selection") Signed-off-by: Nuno Sa Link: https://patch.msgid.link/20240806-dev-fix-ad-sigma-delta-v1-1-aa25b173c063@analog.com Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/ad_sigma_delta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index a2b87f6b7a07..6231c635803d 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -568,7 +568,7 @@ EXPORT_SYMBOL_NS_GPL(ad_sd_validate_trigger, IIO_AD_SIGMA_DELTA); static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_dev) { struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev); - unsigned long irq_flags = irq_get_trigger_type(sigma_delta->spi->irq); + unsigned long irq_flags = irq_get_trigger_type(sigma_delta->irq_line); int ret; if (dev != &sigma_delta->spi->dev) { From 76a160c8e82a543fb766d864aef4bf3314141ef5 Mon Sep 17 00:00:00 2001 From: Dumitru Ceclan Date: Wed, 31 Jul 2024 15:37:22 +0300 Subject: [PATCH 289/374] iio: adc: ad7124: fix chip ID mismatch commit 96f9ab0d5933c1c00142dd052f259fce0bc3ced2 upstream. The ad7124_soft_reset() function has the assumption that the chip will assert the "power-on reset" bit in the STATUS register after a software reset without any delay. The POR bit =0 is used to check if the chip initialization is done. A chip ID mismatch probe error appears intermittently when the probe continues too soon and the ID register does not contain the expected value. Fix by adding a 200us delay after the software reset command is issued. Fixes: b3af341bbd96 ("iio: adc: Add ad7124 support") Signed-off-by: Dumitru Ceclan Reviewed-by: Nuno Sa Link: https://patch.msgid.link/20240731-ad7124-fix-v1-1-46a76aa4b9be@analog.com Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/ad7124.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 9819cdac933c..bd323c6bd756 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -766,6 +766,7 @@ static int ad7124_soft_reset(struct ad7124_state *st) if (ret < 0) return ret; + fsleep(200); timeout = 100; do { ret = ad_sd_read_reg(&st->sd, AD7124_STATUS, 1, &readval); From a334f875a8862930034e67099b46836076c3ba3d Mon Sep 17 00:00:00 2001 From: Dumitru Ceclan Date: Tue, 6 Aug 2024 11:51:33 +0300 Subject: [PATCH 290/374] iio: adc: ad7124: fix DT configuration parsing commit 61cbfb5368dd50ed0d65ce21d305aa923581db2b upstream. The cfg pointer is set before reading the channel number that the configuration should point to. This causes configurations to be shifted by one channel. For example setting bipolar to the first channel defined in the DT will cause bipolar mode to be active on the second defined channel. Fix by moving the cfg pointer setting after reading the channel number. Fixes: 7b8d045e497a ("iio: adc: ad7124: allow more than 8 channels") Signed-off-by: Dumitru Ceclan Reviewed-by: Nuno Sa Link: https://patch.msgid.link/20240806085133.114547-1-dumitru.ceclan@analog.com Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/ad7124.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index bd323c6bd756..d2fe0269b6d3 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -842,8 +842,6 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev, st->channels = channels; device_for_each_child_node_scoped(dev, child) { - cfg = &st->channels[channel].cfg; - ret = fwnode_property_read_u32(child, "reg", &channel); if (ret) return ret; @@ -861,6 +859,7 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev, st->channels[channel].ain = AD7124_CHANNEL_AINP(ain[0]) | AD7124_CHANNEL_AINM(ain[1]); + cfg = &st->channels[channel].cfg; cfg->bipolar = fwnode_property_read_bool(child, "bipolar"); ret = fwnode_property_read_u32(child, "adi,reference-select", &tmp); From 0140b2b5b76a4aaa58ecee31a4c83583ef86a173 Mon Sep 17 00:00:00 2001 From: Faisal Hassan Date: Thu, 29 Aug 2024 15:15:02 +0530 Subject: [PATCH 291/374] usb: dwc3: core: update LC timer as per USB Spec V3.2 commit 9149c9b0c7e046273141e41eebd8a517416144ac upstream. This fix addresses STAR 9001285599, which only affects DWC_usb3 version 3.20a. The timer value for PM_LC_TIMER in DWC_usb3 3.20a for the Link ECN changes is incorrect. If the PM TIMER ECN is enabled via GUCTL2[19], the link compliance test (TD7.21) may fail. If the ECN is not enabled (GUCTL2[19] = 0), the controller will use the old timer value (5us), which is still acceptable for the link compliance test. Therefore, clear GUCTL2[19] to pass the USB link compliance test: TD 7.21. Cc: stable@vger.kernel.org Signed-off-by: Faisal Hassan Acked-by: Thinh Nguyen Link: https://lore.kernel.org/r/20240829094502.26502-1-quic_faisalh@quicinc.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/dwc3/core.c | 15 +++++++++++++++ drivers/usb/dwc3/core.h | 2 ++ 2 files changed, 17 insertions(+) diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 31df6fdc233e..ee95d3909430 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1367,6 +1367,21 @@ static int dwc3_core_init(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); } + /* + * STAR 9001285599: This issue affects DWC_usb3 version 3.20a + * only. If the PM TIMER ECM is enabled through GUCTL2[19], the + * link compliance test (TD7.21) may fail. If the ECN is not + * enabled (GUCTL2[19] = 0), the controller will use the old timer + * value (5us), which is still acceptable for the link compliance + * test. Therefore, do not enable PM TIMER ECM in 3.20a by + * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0. + */ + if (DWC3_VER_IS(DWC3, 320A)) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); + reg &= ~DWC3_GUCTL2_LC_TIMER; + dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); + } + /* * When configured in HOST mode, after issuing U3/L2 exit controller * fails to send proper CRC checksum in CRC5 feild. Because of this diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 3781c736c1a1..ed7f999e05bb 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -417,6 +417,7 @@ /* Global User Control Register 2 */ #define DWC3_GUCTL2_RST_ACTBITLATER BIT(14) +#define DWC3_GUCTL2_LC_TIMER BIT(19) /* Global User Control Register 3 */ #define DWC3_GUCTL3_SPLITDISABLE BIT(14) @@ -1262,6 +1263,7 @@ struct dwc3 { #define DWC3_REVISION_290A 0x5533290a #define DWC3_REVISION_300A 0x5533300a #define DWC3_REVISION_310A 0x5533310a +#define DWC3_REVISION_320A 0x5533320a #define DWC3_REVISION_330A 0x5533330a #define DWC31_REVISION_ANY 0x0 From 2ba5ad41218a09fdffd2d733805ed7104924ef82 Mon Sep 17 00:00:00 2001 From: Pawel Laszczak Date: Mon, 2 Sep 2024 11:09:16 +0000 Subject: [PATCH 292/374] usb: cdns2: Fix controller reset issue commit e2940928115e83d707b21bf00b0db7d6c15f8341 upstream. Patch fixes the procedure of resetting controller. The CPUCTRL register is write only and reading returns 0. Waiting for reset to complite is incorrect. Fixes: 3eb1f1efe204 ("usb: cdns2: Add main part of Cadence USBHS driver") cc: stable@vger.kernel.org Signed-off-by: Pawel Laszczak Link: https://lore.kernel.org/r/PH7PR07MB9538D56D75F1F399D0BB96F0DD922@PH7PR07MB9538.namprd07.prod.outlook.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/udc/cdns2/cdns2-gadget.c | 12 +++--------- drivers/usb/gadget/udc/cdns2/cdns2-gadget.h | 9 +++++++++ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c index 0eed0e03842c..d394affb7072 100644 --- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c +++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c @@ -2251,7 +2251,6 @@ static int cdns2_gadget_start(struct cdns2_device *pdev) { u32 max_speed; void *buf; - int val; int ret; pdev->usb_regs = pdev->regs; @@ -2261,14 +2260,9 @@ static int cdns2_gadget_start(struct cdns2_device *pdev) pdev->adma_regs = pdev->regs + CDNS2_ADMA_REGS_OFFSET; /* Reset controller. */ - set_reg_bit_8(&pdev->usb_regs->cpuctrl, CPUCTRL_SW_RST); - - ret = readl_poll_timeout_atomic(&pdev->usb_regs->cpuctrl, val, - !(val & CPUCTRL_SW_RST), 1, 10000); - if (ret) { - dev_err(pdev->dev, "Error: reset controller timeout\n"); - return -EINVAL; - } + writeb(CPUCTRL_SW_RST | CPUCTRL_UPCLK | CPUCTRL_WUEN, + &pdev->usb_regs->cpuctrl); + usleep_range(5, 10); usb_initialize_gadget(pdev->dev, &pdev->gadget, NULL); diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h index 71e2f62d653a..b5d5ec12e986 100644 --- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h +++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h @@ -292,8 +292,17 @@ struct cdns2_usb_regs { #define SPEEDCTRL_HSDISABLE BIT(7) /* CPUCTRL- bitmasks. */ +/* UP clock enable */ +#define CPUCTRL_UPCLK BIT(0) /* Controller reset bit. */ #define CPUCTRL_SW_RST BIT(1) +/** + * If the wuen bit is ‘1’, the upclken is automatically set to ‘1’ after + * detecting rising edge of wuintereq interrupt. If the wuen bit is ‘0’, + * the wuintereq interrupt is ignored. + */ +#define CPUCTRL_WUEN BIT(7) + /** * struct cdns2_adma_regs - ADMA controller registers. From c1e23cee9ce377a0846259ee19f13479fcd42927 Mon Sep 17 00:00:00 2001 From: Prashanth K Date: Wed, 28 Aug 2024 12:13:02 +0530 Subject: [PATCH 293/374] usb: dwc3: Avoid waking up gadget during startxfer commit 00dcf2fa449f23a263343d7fe051741bdde65d0b upstream. When operating in High-Speed, it is observed that DSTS[USBLNKST] doesn't update link state immediately after receiving the wakeup interrupt. Since wakeup event handler calls the resume callbacks, there is a chance that function drivers can perform an ep queue, which in turn tries to perform remote wakeup from send_gadget_ep_cmd(STARTXFER). This happens because DSTS[[21:18] wasn't updated to U0 yet, it's observed that the latency of DSTS can be in order of milli-seconds. Hence avoid calling gadget_wakeup during startxfer to prevent unnecessarily issuing remote wakeup to host. Fixes: c36d8e947a56 ("usb: dwc3: gadget: put link to U0 before Start Transfer") Cc: stable@vger.kernel.org Suggested-by: Thinh Nguyen Signed-off-by: Prashanth K Acked-by: Thinh Nguyen Link: https://lore.kernel.org/r/20240828064302.3796315-1-quic_prashk@quicinc.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/dwc3/gadget.c | 41 ++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 89fc690fdf34..291bc549935b 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -287,6 +287,23 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async); * * Caller should handle locking. This function will issue @cmd with given * @params to @dep and wait for its completion. + * + * According to the programming guide, if the link state is in L1/L2/U3, + * then sending the Start Transfer command may not complete. The + * programming guide suggested to bring the link state back to ON/U0 by + * performing remote wakeup prior to sending the command. However, don't + * initiate remote wakeup when the user/function does not send wakeup + * request via wakeup ops. Send the command when it's allowed. + * + * Notes: + * For L1 link state, issuing a command requires the clearing of + * GUSB2PHYCFG.SUSPENDUSB2, which turns on the signal required to complete + * the given command (usually within 50us). This should happen within the + * command timeout set by driver. No additional step is needed. + * + * For L2 or U3 link state, the gadget is in USB suspend. Care should be + * taken when sending Start Transfer command to ensure that it's done after + * USB resume. */ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, struct dwc3_gadget_ep_cmd_params *params) @@ -327,30 +344,6 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); } - if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { - int link_state; - - /* - * Initiate remote wakeup if the link state is in U3 when - * operating in SS/SSP or L1/L2 when operating in HS/FS. If the - * link state is in U1/U2, no remote wakeup is needed. The Start - * Transfer command will initiate the link recovery. - */ - link_state = dwc3_gadget_get_link_state(dwc); - switch (link_state) { - case DWC3_LINK_STATE_U2: - if (dwc->gadget->speed >= USB_SPEED_SUPER) - break; - - fallthrough; - case DWC3_LINK_STATE_U3: - ret = __dwc3_gadget_wakeup(dwc, false); - dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", - ret); - break; - } - } - /* * For some commands such as Update Transfer command, DEPCMDPARn * registers are reserved. Since the driver often sends Update Transfer From 31b793b08134634d9e8669378faf14293dc7188c Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Fri, 30 Aug 2024 14:16:45 +0300 Subject: [PATCH 294/374] usb: typec: ucsi: Fix the partner PD revision commit de3d7969f6a80aa5abbbc0f39897495ae35548d0 upstream. The Partner PD Revision field in GET_CONNECTOR_CAPABILITY data structure was introduced in UCSI v2.1. In ucsi_check_connector_capability() the version was assumed to be 2.0, and in ucsi_register_partner() the field is accessed completely unconditionally. Fixing the version in ucsi_check_connector_capability(), and replacing the unconditional pd_revision assignment with a direct call to ucsi_check_connector_capability() in ucsi_register_port(). After this the revision is also checked only if there is a PD contract. Fixes: b9fccfdb4ebb ("usb: typec: ucsi: Get PD revision for partner") Cc: stable@vger.kernel.org Signed-off-by: Heikki Krogerus Link: https://lore.kernel.org/r/20240830111645.2134301-1-heikki.krogerus@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/ucsi/ucsi.c | 50 ++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 2cc7aedd490f..45e91d065b3b 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -961,6 +961,27 @@ static void ucsi_unregister_cable(struct ucsi_connector *con) con->cable = NULL; } +static int ucsi_check_connector_capability(struct ucsi_connector *con) +{ + u64 command; + int ret; + + if (!con->partner || con->ucsi->version < UCSI_VERSION_2_1) + return 0; + + command = UCSI_GET_CONNECTOR_CAPABILITY | UCSI_CONNECTOR_NUMBER(con->num); + ret = ucsi_send_command(con->ucsi, command, &con->cap, sizeof(con->cap)); + if (ret < 0) { + dev_err(con->ucsi->dev, "GET_CONNECTOR_CAPABILITY failed (%d)\n", ret); + return ret; + } + + typec_partner_set_pd_revision(con->partner, + UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(con->cap.flags)); + + return ret; +} + static void ucsi_pwr_opmode_change(struct ucsi_connector *con) { switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) { @@ -970,6 +991,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con) ucsi_partner_task(con, ucsi_get_src_pdos, 30, 0); ucsi_partner_task(con, ucsi_check_altmodes, 30, 0); ucsi_partner_task(con, ucsi_register_partner_pdos, 1, HZ); + ucsi_partner_task(con, ucsi_check_connector_capability, 1, HZ); break; case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5: con->rdo = 0; @@ -1013,7 +1035,6 @@ static int ucsi_register_partner(struct ucsi_connector *con) desc.identity = &con->partner_identity; desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD; - desc.pd_revision = UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(con->cap.flags); partner = typec_register_partner(con->port, &desc); if (IS_ERR(partner)) { @@ -1090,27 +1111,6 @@ static void ucsi_partner_change(struct ucsi_connector *con) con->num, u_role); } -static int ucsi_check_connector_capability(struct ucsi_connector *con) -{ - u64 command; - int ret; - - if (!con->partner || con->ucsi->version < UCSI_VERSION_2_0) - return 0; - - command = UCSI_GET_CONNECTOR_CAPABILITY | UCSI_CONNECTOR_NUMBER(con->num); - ret = ucsi_send_command(con->ucsi, command, &con->cap, sizeof(con->cap)); - if (ret < 0) { - dev_err(con->ucsi->dev, "GET_CONNECTOR_CAPABILITY failed (%d)\n", ret); - return ret; - } - - typec_partner_set_pd_revision(con->partner, - UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(con->cap.flags)); - - return ret; -} - static int ucsi_check_connection(struct ucsi_connector *con) { u8 prev_flags = con->status.flags; @@ -1225,15 +1225,16 @@ static void ucsi_handle_connector_change(struct work_struct *work) if (con->status.flags & UCSI_CONSTAT_CONNECTED) { ucsi_register_partner(con); ucsi_partner_task(con, ucsi_check_connection, 1, HZ); - ucsi_partner_task(con, ucsi_check_connector_capability, 1, HZ); if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE) ucsi_partner_task(con, ucsi_get_partner_identity, 1, HZ); if (con->ucsi->cap.features & UCSI_CAP_CABLE_DETAILS) ucsi_partner_task(con, ucsi_check_cable, 1, HZ); if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) == - UCSI_CONSTAT_PWR_OPMODE_PD) + UCSI_CONSTAT_PWR_OPMODE_PD) { ucsi_partner_task(con, ucsi_register_partner_pdos, 1, HZ); + ucsi_partner_task(con, ucsi_check_connector_capability, 1, HZ); + } } else { ucsi_unregister_partner(con); } @@ -1650,6 +1651,7 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con) ucsi_register_device_pdos(con); ucsi_get_src_pdos(con); ucsi_check_altmodes(con); + ucsi_check_connector_capability(con); } trace_ucsi_register_port(con->num, &con->status); From bfc1704d909dc9911a558b1a5833d3d61a43a1f2 Mon Sep 17 00:00:00 2001 From: Sukrut Bellary Date: Mon, 2 Sep 2024 15:14:09 +0100 Subject: [PATCH 295/374] misc: fastrpc: Fix double free of 'buf' in error path commit e8c276d4dc0e19ee48385f74426aebc855b49aaf upstream. smatch warning: drivers/misc/fastrpc.c:1926 fastrpc_req_mmap() error: double free of 'buf' In fastrpc_req_mmap() error path, the fastrpc buffer is freed in fastrpc_req_munmap_impl() if unmap is successful. But in the end, there is an unconditional call to fastrpc_buf_free(). So the above case triggers the double free of fastrpc buf. Fixes: 72fa6f7820c4 ("misc: fastrpc: Rework fastrpc_req_munmap") Reviewed-by: Shuah Khan Reviewed-by: Dan Carpenter Reviewed-by: Srinivas Kandagatla Signed-off-by: Sukrut Bellary Signed-off-by: Srinivas Kandagatla Cc: stable Link: https://lore.kernel.org/r/20240902141409.70371-2-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/misc/fastrpc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 5680856c0fb8..f525971601e4 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1912,7 +1912,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) &args[0]); if (err) { dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size); - goto err_invoke; + fastrpc_buf_free(buf); + return err; } /* update the buffer to be able to deallocate the memory on the DSP */ @@ -1950,8 +1951,6 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) err_assign: fastrpc_req_munmap_impl(fl, buf); -err_invoke: - fastrpc_buf_free(buf); return err; } From 109e845c1184c9f786d41516348ba3efd9112792 Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Thu, 22 Aug 2024 18:23:52 +0000 Subject: [PATCH 296/374] binder: fix UAF caused by offsets overwrite commit 4df153652cc46545722879415937582028c18af5 upstream. Binder objects are processed and copied individually into the target buffer during transactions. Any raw data in-between these objects is copied as well. However, this raw data copy lacks an out-of-bounds check. If the raw data exceeds the data section size then the copy overwrites the offsets section. This eventually triggers an error that attempts to unwind the processed objects. However, at this point the offsets used to index these objects are now corrupted. Unwinding with corrupted offsets can result in decrements of arbitrary nodes and lead to their premature release. Other users of such nodes are left with a dangling pointer triggering a use-after-free. This issue is made evident by the following KASAN report (trimmed): ================================================================== BUG: KASAN: slab-use-after-free in _raw_spin_lock+0xe4/0x19c Write of size 4 at addr ffff47fc91598f04 by task binder-util/743 CPU: 9 UID: 0 PID: 743 Comm: binder-util Not tainted 6.11.0-rc4 #1 Hardware name: linux,dummy-virt (DT) Call trace: _raw_spin_lock+0xe4/0x19c binder_free_buf+0x128/0x434 binder_thread_write+0x8a4/0x3260 binder_ioctl+0x18f0/0x258c [...] Allocated by task 743: __kmalloc_cache_noprof+0x110/0x270 binder_new_node+0x50/0x700 binder_transaction+0x413c/0x6da8 binder_thread_write+0x978/0x3260 binder_ioctl+0x18f0/0x258c [...] Freed by task 745: kfree+0xbc/0x208 binder_thread_read+0x1c5c/0x37d4 binder_ioctl+0x16d8/0x258c [...] ================================================================== To avoid this issue, let's check that the raw data copy is within the boundaries of the data section. Fixes: 6d98eb95b450 ("binder: avoid potential data leakage when copying txn") Cc: Todd Kjos Cc: stable@vger.kernel.org Signed-off-by: Carlos Llamas Link: https://lore.kernel.org/r/20240822182353.2129600-1-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 2d0a24a56508..2ca71b1d3678 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3342,6 +3342,7 @@ static void binder_transaction(struct binder_proc *proc, */ copy_size = object_offset - user_offset; if (copy_size && (user_offset > object_offset || + object_offset > tr->data_size || binder_alloc_copy_user_to_buffer( &target_proc->alloc, t->buffer, user_offset, From 540ca4c902d970fe23350c29bc2001fe4ca9efac Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 2 Sep 2024 15:25:10 +0100 Subject: [PATCH 297/374] dt-bindings: nvmem: Use soc-nvmem node name instead of nvmem commit a759d1f25182f51210c8831d71ce7ee81e0930f4 upstream. Based on commit d8764d347bd7 ("dt-bindings: firmware: xilinx: Describe soc-nvmem subnode") soc-nvmem should be used instead of simple nvmem that's why also update example to have it described correctly everywhere. Fixes: c7f99cd8fb6b ("dt-bindings: nvmem: Convert xlnx,zynqmp-nvmem.txt to yaml") Cc: stable Signed-off-by: Michal Simek Acked-by: Rob Herring (Arm) Signed-off-by: Srinivas Kandagatla Acked-by: Conor Dooley Link: https://lore.kernel.org/r/20240902142510.71096-4-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.yaml b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.yaml index 917c40d5c382..1cbe44ab23b1 100644 --- a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.yaml +++ b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.yaml @@ -28,7 +28,7 @@ unevaluatedProperties: false examples: - | - nvmem { + soc-nvmem { compatible = "xlnx,zynqmp-nvmem-fw"; nvmem-layout { compatible = "fixed-layout"; From f78addda7afc2b15617b710ad2fa106be68f0609 Mon Sep 17 00:00:00 2001 From: John Thomson Date: Mon, 2 Sep 2024 15:25:08 +0100 Subject: [PATCH 298/374] nvmem: u-boot-env: error if NVMEM device is too small MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 8679e8b4a1ebdb40c4429e49368d29353e07b601 upstream. Verify data size before trying to parse it to avoid reading out of buffer. This could happen in case of problems at MTD level or invalid DT bindings. Signed-off-by: John Thomson Cc: stable Fixes: d5542923f200 ("nvmem: add driver handling U-Boot environment variables") [rmilecki: simplify commit description & rebase] Signed-off-by: Rafał Miłecki Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20240902142510.71096-2-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/nvmem/u-boot-env.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c index befbab156cda..adabbfdad6fb 100644 --- a/drivers/nvmem/u-boot-env.c +++ b/drivers/nvmem/u-boot-env.c @@ -176,6 +176,13 @@ static int u_boot_env_parse(struct u_boot_env *priv) data_offset = offsetof(struct u_boot_env_image_broadcom, data); break; } + + if (dev_size < data_offset) { + dev_err(dev, "Device too small for u-boot-env\n"); + err = -EIO; + goto err_kfree; + } + crc32_addr = (__le32 *)(buf + crc32_offset); crc32 = le32_to_cpu(*crc32_addr); crc32_data_len = dev_size - crc32_data_offset; From 2241b78f22450d55fed8aa7b9e266de3b50ad024 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 2 Sep 2024 15:25:09 +0100 Subject: [PATCH 299/374] nvmem: Fix return type of devm_nvmem_device_get() in kerneldoc commit c69f37f6559a8948d70badd2b179db7714dedd62 upstream. devm_nvmem_device_get() returns an nvmem device, not an nvmem cell. Fixes: e2a5402ec7c6d044 ("nvmem: Add nvmem_device based consumer apis.") Cc: stable Signed-off-by: Geert Uytterhoeven Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20240902142510.71096-3-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/nvmem/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index f8dd7eb40fbe..cccfa9fe8119 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -1258,13 +1258,13 @@ void nvmem_device_put(struct nvmem_device *nvmem) EXPORT_SYMBOL_GPL(nvmem_device_put); /** - * devm_nvmem_device_get() - Get nvmem cell of device form a given id + * devm_nvmem_device_get() - Get nvmem device of device form a given id * * @dev: Device that requests the nvmem device. * @id: name id for the requested nvmem device. * - * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell - * on success. The nvmem_cell will be freed by the automatically once the + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. The nvmem_device will be freed by the automatically once the * device is freed. */ struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) From 928e399e84f4e80307dce44e89415115c473275b Mon Sep 17 00:00:00 2001 From: Saurabh Sengar Date: Thu, 29 Aug 2024 12:43:11 +0530 Subject: [PATCH 300/374] uio_hv_generic: Fix kernel NULL pointer dereference in hv_uio_rescind commit fb1adbd7e50f3d2de56d0a2bb0700e2e819a329e upstream. For primary VM Bus channels, primary_channel pointer is always NULL. This pointer is valid only for the secondary channels. Also, rescind callback is meant for primary channels only. Fix NULL pointer dereference by retrieving the device_obj from the parent for the primary channel. Cc: stable@vger.kernel.org Fixes: ca3cda6fcf1e ("uio_hv_generic: add rescind support") Signed-off-by: Saurabh Sengar Signed-off-by: Naman Jain Link: https://lore.kernel.org/r/20240829071312.1595-2-namjain@linux.microsoft.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_hv_generic.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index b45653752301..e3e66a3e85a8 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -106,10 +106,11 @@ static void hv_uio_channel_cb(void *context) /* * Callback from vmbus_event when channel is rescinded. + * It is meant for rescind of primary channels only. */ static void hv_uio_rescind(struct vmbus_channel *channel) { - struct hv_device *hv_dev = channel->primary_channel->device_obj; + struct hv_device *hv_dev = channel->device_obj; struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); /* From a11874db04eaba303befb4e34533f2939a82ffa3 Mon Sep 17 00:00:00 2001 From: Naman Jain Date: Thu, 29 Aug 2024 12:43:12 +0530 Subject: [PATCH 301/374] Drivers: hv: vmbus: Fix rescind handling in uio_hv_generic commit 6fd28941447bf2c8ca0f26fda612a1cabc41663f upstream. Rescind offer handling relies on rescind callbacks for some of the resources cleanup, if they are registered. It does not unregister vmbus device for the primary channel closure, when callback is registered. Without it, next onoffer does not come, rescind flag remains set and device goes to unusable state. Add logic to unregister vmbus for the primary channel in rescind callback to ensure channel removal and relid release, and to ensure that next onoffer can be received and handled properly. Cc: stable@vger.kernel.org Fixes: ca3cda6fcf1e ("uio_hv_generic: add rescind support") Signed-off-by: Naman Jain Reviewed-by: Saurabh Sengar Link: https://lore.kernel.org/r/20240829071312.1595-3-namjain@linux.microsoft.com Signed-off-by: Greg Kroah-Hartman --- drivers/hv/vmbus_drv.c | 1 + drivers/uio/uio_hv_generic.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 12a707ab73f8..4d8378b677a6 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1952,6 +1952,7 @@ void vmbus_device_unregister(struct hv_device *device_obj) */ device_unregister(&device_obj->device); } +EXPORT_SYMBOL_GPL(vmbus_device_unregister); #ifdef CONFIG_ACPI /* diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index e3e66a3e85a8..870409599411 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -121,6 +121,14 @@ static void hv_uio_rescind(struct vmbus_channel *channel) /* Wake up reader */ uio_event_notify(&pdata->info); + + /* + * With rescind callback registered, rescind path will not unregister the device + * from vmbus when the primary channel is rescinded. + * Without it, rescind handling is incomplete and next onoffer msg does not come. + * Unregister the device from vmbus here. + */ + vmbus_device_unregister(channel->device_obj); } /* Sysfs API to allow mmap of the ring buffers From 00fe5292f081f8d773e572df8e03bf6e1855fe49 Mon Sep 17 00:00:00 2001 From: David Fernandez Gonzalez Date: Wed, 28 Aug 2024 15:43:37 +0000 Subject: [PATCH 302/374] VMCI: Fix use-after-free when removing resource in vmci_resource_remove() commit 48b9a8dabcc3cf5f961b2ebcd8933bf9204babb7 upstream. When removing a resource from vmci_resource_table in vmci_resource_remove(), the search is performed using the resource handle by comparing context and resource fields. It is possible though to create two resources with different types but same handle (same context and resource fields). When trying to remove one of the resources, vmci_resource_remove() may not remove the intended one, but the object will still be freed as in the case of the datagram type in vmci_datagram_destroy_handle(). vmci_resource_table will still hold a pointer to this freed resource leading to a use-after-free vulnerability. BUG: KASAN: use-after-free in vmci_handle_is_equal include/linux/vmw_vmci_defs.h:142 [inline] BUG: KASAN: use-after-free in vmci_resource_remove+0x3a1/0x410 drivers/misc/vmw_vmci/vmci_resource.c:147 Read of size 4 at addr ffff88801c16d800 by task syz-executor197/1592 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x82/0xa9 lib/dump_stack.c:106 print_address_description.constprop.0+0x21/0x366 mm/kasan/report.c:239 __kasan_report.cold+0x7f/0x132 mm/kasan/report.c:425 kasan_report+0x38/0x51 mm/kasan/report.c:442 vmci_handle_is_equal include/linux/vmw_vmci_defs.h:142 [inline] vmci_resource_remove+0x3a1/0x410 drivers/misc/vmw_vmci/vmci_resource.c:147 vmci_qp_broker_detach+0x89a/0x11b9 drivers/misc/vmw_vmci/vmci_queue_pair.c:2182 ctx_free_ctx+0x473/0xbe1 drivers/misc/vmw_vmci/vmci_context.c:444 kref_put include/linux/kref.h:65 [inline] vmci_ctx_put drivers/misc/vmw_vmci/vmci_context.c:497 [inline] vmci_ctx_destroy+0x170/0x1d6 drivers/misc/vmw_vmci/vmci_context.c:195 vmci_host_close+0x125/0x1ac drivers/misc/vmw_vmci/vmci_host.c:143 __fput+0x261/0xa34 fs/file_table.c:282 task_work_run+0xf0/0x194 kernel/task_work.c:164 tracehook_notify_resume include/linux/tracehook.h:189 [inline] exit_to_user_mode_loop+0x184/0x189 kernel/entry/common.c:187 exit_to_user_mode_prepare+0x11b/0x123 kernel/entry/common.c:220 __syscall_exit_to_user_mode_work kernel/entry/common.c:302 [inline] syscall_exit_to_user_mode+0x18/0x42 kernel/entry/common.c:313 do_syscall_64+0x41/0x85 arch/x86/entry/common.c:86 entry_SYSCALL_64_after_hwframe+0x6e/0x0 This change ensures the type is also checked when removing the resource from vmci_resource_table in vmci_resource_remove(). Fixes: bc63dedb7d46 ("VMCI: resource object implementation.") Cc: stable@vger.kernel.org Reported-by: George Kennedy Signed-off-by: David Fernandez Gonzalez Link: https://lore.kernel.org/r/20240828154338.754746-1-david.fernandez.gonzalez@oracle.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_resource.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c index 692daa9eff34..19c9d2cdd277 100644 --- a/drivers/misc/vmw_vmci/vmci_resource.c +++ b/drivers/misc/vmw_vmci/vmci_resource.c @@ -144,7 +144,8 @@ void vmci_resource_remove(struct vmci_resource *resource) spin_lock(&vmci_resource_table.lock); hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) { - if (vmci_handle_is_equal(r->handle, resource->handle)) { + if (vmci_handle_is_equal(r->handle, resource->handle) && + resource->type == r->type) { hlist_del_init_rcu(&r->node); break; } From 13145027070c86aee74e73a029afe8394773a0fc Mon Sep 17 00:00:00 2001 From: Jacky Bai Date: Thu, 25 Jul 2024 15:33:54 -0400 Subject: [PATCH 303/374] clocksource/drivers/imx-tpm: Fix return -ETIME when delta exceeds INT_MAX commit 5b8843fcd49827813da80c0f590a17ae4ce93c5d upstream. In tpm_set_next_event(delta), return -ETIME by wrong cast to int when delta is larger than INT_MAX. For example: tpm_set_next_event(delta = 0xffff_fffe) { ... next = tpm_read_counter(); // assume next is 0x10 next += delta; // next will 0xffff_fffe + 0x10 = 0x1_0000_000e now = tpm_read_counter(); // now is 0x10 ... return (int)(next - now) <= 0 ? -ETIME : 0; ^^^^^^^^^^ 0x1_0000_000e - 0x10 = 0xffff_fffe, which is -2 when cast to int. So return -ETIME. } To fix this, introduce a 'prev' variable and check if 'now - prev' is larger than delta. Cc: stable@vger.kernel.org Fixes: 059ab7b82eec ("clocksource/drivers/imx-tpm: Add imx tpm timer support") Signed-off-by: Jacky Bai Reviewed-by: Peng Fan Reviewed-by: Ye Li Reviewed-by: Jason Liu Signed-off-by: Frank Li Link: https://lore.kernel.org/r/20240725193355.1436005-1-Frank.Li@nxp.com Signed-off-by: Daniel Lezcano Signed-off-by: Greg Kroah-Hartman --- drivers/clocksource/timer-imx-tpm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c index bd64a8a8427f..cd23caf1e599 100644 --- a/drivers/clocksource/timer-imx-tpm.c +++ b/drivers/clocksource/timer-imx-tpm.c @@ -83,10 +83,10 @@ static u64 notrace tpm_read_sched_clock(void) static int tpm_set_next_event(unsigned long delta, struct clock_event_device *evt) { - unsigned long next, now; + unsigned long next, prev, now; - next = tpm_read_counter(); - next += delta; + prev = tpm_read_counter(); + next = prev + delta; writel(next, timer_base + TPM_C0V); now = tpm_read_counter(); @@ -96,7 +96,7 @@ static int tpm_set_next_event(unsigned long delta, * of writing CNT registers which may cause the min_delta event got * missed, so we need add a ETIME check here in case it happened. */ - return (int)(next - now) <= 0 ? -ETIME : 0; + return (now - prev) >= delta ? -ETIME : 0; } static int tpm_set_state_oneshot(struct clock_event_device *evt) From 9c09812e885ac93555540774b679aa8a912df890 Mon Sep 17 00:00:00 2001 From: Jacky Bai Date: Thu, 25 Jul 2024 15:33:55 -0400 Subject: [PATCH 304/374] clocksource/drivers/imx-tpm: Fix next event not taking effect sometime commit 3d5c2f8e75a55cfb11a85086c71996af0354a1fb upstream. The value written into the TPM CnV can only be updated into the hardware when the counter increases. Additional writes to the CnV write buffer are ignored until the register has been updated. Therefore, we need to check if the CnV has been updated before continuing. This may require waiting for 1 counter cycle in the worst case. Cc: stable@vger.kernel.org Fixes: 059ab7b82eec ("clocksource/drivers/imx-tpm: Add imx tpm timer support") Signed-off-by: Jacky Bai Reviewed-by: Peng Fan Reviewed-by: Ye Li Reviewed-by: Jason Liu Signed-off-by: Frank Li Link: https://lore.kernel.org/r/20240725193355.1436005-2-Frank.Li@nxp.com Signed-off-by: Daniel Lezcano Signed-off-by: Greg Kroah-Hartman --- drivers/clocksource/timer-imx-tpm.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c index cd23caf1e599..92c025b70eb6 100644 --- a/drivers/clocksource/timer-imx-tpm.c +++ b/drivers/clocksource/timer-imx-tpm.c @@ -90,6 +90,14 @@ static int tpm_set_next_event(unsigned long delta, writel(next, timer_base + TPM_C0V); now = tpm_read_counter(); + /* + * Need to wait CNT increase at least 1 cycle to make sure + * the C0V has been updated into HW. + */ + if ((next & 0xffffffff) != readl(timer_base + TPM_C0V)) + while (now == tpm_read_counter()) + ; + /* * NOTE: We observed in a very small probability, the bus fabric * contention between GPU and A7 may results a few cycles delay From e47d7f80b4d734cd4744b5ee9cb944f7a32bd020 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Mon, 19 Aug 2024 12:03:35 +0200 Subject: [PATCH 305/374] clocksource/drivers/timer-of: Remove percpu irq related code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 471ef0b5a8aaca4296108e756b970acfc499ede4 upstream. GCC's named address space checks errors out with: drivers/clocksource/timer-of.c: In function ‘timer_of_irq_exit’: drivers/clocksource/timer-of.c:29:46: error: passing argument 2 of ‘free_percpu_irq’ from pointer to non-enclosed address space 29 | free_percpu_irq(of_irq->irq, clkevt); | ^~~~~~ In file included from drivers/clocksource/timer-of.c:8: ./include/linux/interrupt.h:201:43: note: expected ‘__seg_gs void *’ but argument is of type ‘struct clock_event_device *’ 201 | extern void free_percpu_irq(unsigned int, void __percpu *); | ^~~~~~~~~~~~~~~ drivers/clocksource/timer-of.c: In function ‘timer_of_irq_init’: drivers/clocksource/timer-of.c:74:51: error: passing argument 4 of ‘request_percpu_irq’ from pointer to non-enclosed address space 74 | np->full_name, clkevt) : | ^~~~~~ ./include/linux/interrupt.h:190:56: note: expected ‘__seg_gs void *’ but argument is of type ‘struct clock_event_device *’ 190 | const char *devname, void __percpu *percpu_dev_id) Sparse warns about: timer-of.c:29:46: warning: incorrect type in argument 2 (different address spaces) timer-of.c:29:46: expected void [noderef] __percpu * timer-of.c:29:46: got struct clock_event_device *clkevt timer-of.c:74:51: warning: incorrect type in argument 4 (different address spaces) timer-of.c:74:51: expected void [noderef] __percpu *percpu_dev_id timer-of.c:74:51: got struct clock_event_device *clkevt It appears the code is incorrect as reported by Uros Bizjak: "The referred code is questionable as it tries to reuse the clkevent pointer once as percpu pointer and once as generic pointer, which should be avoided." This change removes the percpu related code as no drivers is using it. [Daniel: Fixed the description] Fixes: dc11bae785295 ("clocksource/drivers: Add timer-of common init routine") Reported-by: Uros Bizjak Tested-by: Uros Bizjak Link: https://lore.kernel.org/r/20240819100335.2394751-1-daniel.lezcano@linaro.org Signed-off-by: Daniel Lezcano Signed-off-by: Greg Kroah-Hartman --- drivers/clocksource/timer-of.c | 17 ++++------------- drivers/clocksource/timer-of.h | 1 - 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index c3f54d9912be..420202bf76e4 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c @@ -25,10 +25,7 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq) struct clock_event_device *clkevt = &to->clkevt; - if (of_irq->percpu) - free_percpu_irq(of_irq->irq, clkevt); - else - free_irq(of_irq->irq, clkevt); + free_irq(of_irq->irq, clkevt); } /** @@ -42,9 +39,6 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq) * - Get interrupt number by name * - Get interrupt number by index * - * When the interrupt is per CPU, 'request_percpu_irq()' is called, - * otherwise 'request_irq()' is used. - * * Returns 0 on success, < 0 otherwise */ static __init int timer_of_irq_init(struct device_node *np, @@ -69,12 +63,9 @@ static __init int timer_of_irq_init(struct device_node *np, return -EINVAL; } - ret = of_irq->percpu ? - request_percpu_irq(of_irq->irq, of_irq->handler, - np->full_name, clkevt) : - request_irq(of_irq->irq, of_irq->handler, - of_irq->flags ? of_irq->flags : IRQF_TIMER, - np->full_name, clkevt); + ret = request_irq(of_irq->irq, of_irq->handler, + of_irq->flags ? of_irq->flags : IRQF_TIMER, + np->full_name, clkevt); if (ret) { pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np); return ret; diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h index a5478f3e8589..01a2c6b7db06 100644 --- a/drivers/clocksource/timer-of.h +++ b/drivers/clocksource/timer-of.h @@ -11,7 +11,6 @@ struct of_timer_irq { int irq; int index; - int percpu; const char *name; unsigned long flags; irq_handler_t handler; From 13123ef14a94b991d0f129479540f27546cb3329 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Tue, 3 Sep 2024 12:23:12 +0200 Subject: [PATCH 306/374] uprobes: Use kzalloc to allocate xol area commit e240b0fde52f33670d1336697c22d90a4fe33c84 upstream. To prevent unitialized members, use kzalloc to allocate the xol area. Fixes: b059a453b1cf1 ("x86/vdso: Add mremap hook to vm_special_mapping") Signed-off-by: Sven Schnelle Signed-off-by: Peter Zijlstra (Intel) Acked-by: Oleg Nesterov Link: https://lore.kernel.org/r/20240903102313.3402529-1-svens@linux.ibm.com Signed-off-by: Greg Kroah-Hartman --- kernel/events/uprobes.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 2c83ba776fc7..47cdec3e1df1 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1480,7 +1480,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) uprobe_opcode_t insn = UPROBE_SWBP_INSN; struct xol_area *area; - area = kmalloc(sizeof(*area), GFP_KERNEL); + area = kzalloc(sizeof(*area), GFP_KERNEL); if (unlikely(!area)) goto out; @@ -1490,7 +1490,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) goto free_area; area->xol_mapping.name = "[uprobes]"; - area->xol_mapping.fault = NULL; area->xol_mapping.pages = area->pages; area->pages[0] = alloc_page(GFP_HIGHUSER); if (!area->pages[0]) From b9b6882e243b653d379abbeaa64a500182aba370 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 2 Sep 2024 10:14:24 +0200 Subject: [PATCH 307/374] perf/aux: Fix AUX buffer serialization commit 2ab9d830262c132ab5db2f571003d80850d56b2a upstream. Ole reported that event->mmap_mutex is strictly insufficient to serialize the AUX buffer, add a per RB mutex to fully serialize it. Note that in the lock order comment the perf_event::mmap_mutex order was already wrong, that is, it nesting under mmap_lock is not new with this patch. Fixes: 45bfb2e50471 ("perf: Add AUX area to ring buffer for raw data streams") Reported-by: Ole Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- kernel/events/core.c | 18 ++++++++++++------ kernel/events/internal.h | 1 + kernel/events/ring_buffer.c | 2 ++ 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 7891d5a75526..36191add55c3 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1255,8 +1255,9 @@ static void put_ctx(struct perf_event_context *ctx) * perf_event_context::mutex * perf_event::child_mutex; * perf_event_context::lock - * perf_event::mmap_mutex * mmap_lock + * perf_event::mmap_mutex + * perf_buffer::aux_mutex * perf_addr_filters_head::lock * * cpu_hotplug_lock @@ -6383,12 +6384,11 @@ static void perf_mmap_close(struct vm_area_struct *vma) event->pmu->event_unmapped(event, vma->vm_mm); /* - * rb->aux_mmap_count will always drop before rb->mmap_count and - * event->mmap_count, so it is ok to use event->mmap_mutex to - * serialize with perf_mmap here. + * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex + * to avoid complications. */ if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && - atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { + atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) { /* * Stop all AUX events that are writing to this buffer, * so that we can free its AUX pages and corresponding PMU @@ -6405,7 +6405,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) rb_free_aux(rb); WARN_ON_ONCE(refcount_read(&rb->aux_refcount)); - mutex_unlock(&event->mmap_mutex); + mutex_unlock(&rb->aux_mutex); } if (atomic_dec_and_test(&rb->mmap_count)) @@ -6493,6 +6493,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) struct perf_event *event = file->private_data; unsigned long user_locked, user_lock_limit; struct user_struct *user = current_user(); + struct mutex *aux_mutex = NULL; struct perf_buffer *rb = NULL; unsigned long locked, lock_limit; unsigned long vma_size; @@ -6541,6 +6542,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) if (!rb) goto aux_unlock; + aux_mutex = &rb->aux_mutex; + mutex_lock(aux_mutex); + aux_offset = READ_ONCE(rb->user_page->aux_offset); aux_size = READ_ONCE(rb->user_page->aux_size); @@ -6691,6 +6695,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) atomic_dec(&rb->mmap_count); } aux_unlock: + if (aux_mutex) + mutex_unlock(aux_mutex); mutex_unlock(&event->mmap_mutex); /* diff --git a/kernel/events/internal.h b/kernel/events/internal.h index 386d21c7edfa..f376b057320c 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -40,6 +40,7 @@ struct perf_buffer { struct user_struct *mmap_user; /* AUX area */ + struct mutex aux_mutex; long aux_head; unsigned int aux_nest; long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */ diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 485cf0a66631..e4f208df3102 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -337,6 +337,8 @@ ring_buffer_init(struct perf_buffer *rb, long watermark, int flags) */ if (!rb->nr_pages) rb->paused = 1; + + mutex_init(&rb->aux_mutex); } void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags) From b7d4dd381d3e9bee925012a3fa05705c77020795 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Tue, 11 Jun 2024 02:45:14 +0000 Subject: [PATCH 308/374] mm: zswap: rename is_zswap_enabled() to zswap_is_enabled() [ Upstream commit 2b33a97c94bc44468fc1d54b745269c0cf0b7bb2 ] In preparation for introducing a similar function, rename is_zswap_enabled() to use zswap_* prefix like other zswap functions. Link: https://lkml.kernel.org/r/20240611024516.1375191-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed Reviewed-by: Barry Song Reviewed-by: Nhat Pham Cc: Chengming Zhou Cc: Chris Li Cc: David Hildenbrand Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Stable-dep-of: e39925734909 ("mm/memcontrol: respect zswap.writeback setting from parent cg too") Signed-off-by: Sasha Levin --- include/linux/zswap.h | 4 ++-- mm/memcontrol.c | 2 +- mm/zswap.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 2a85b941db97..ce5e7bfe8f1e 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -35,7 +35,7 @@ void zswap_swapoff(int type); void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg); void zswap_lruvec_state_init(struct lruvec *lruvec); void zswap_folio_swapin(struct folio *folio); -bool is_zswap_enabled(void); +bool zswap_is_enabled(void); #else struct zswap_lruvec_state {}; @@ -60,7 +60,7 @@ static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {} static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {} static inline void zswap_folio_swapin(struct folio *folio) {} -static inline bool is_zswap_enabled(void) +static inline bool zswap_is_enabled(void) { return false; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 332f190bf3d6..ff1e7d2260ab 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -8444,7 +8444,7 @@ void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) { /* if zswap is disabled, do not block pages going to the swapping device */ - return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback); + return !zswap_is_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback); } static u64 zswap_current_read(struct cgroup_subsys_state *css, diff --git a/mm/zswap.c b/mm/zswap.c index a50e2986cd2f..ac65758dd2af 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -131,7 +131,7 @@ static bool zswap_shrinker_enabled = IS_ENABLED( CONFIG_ZSWAP_SHRINKER_DEFAULT_ON); module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644); -bool is_zswap_enabled(void) +bool zswap_is_enabled(void) { return zswap_enabled; } From 43568d917d93ea968ac82426d3748f2e0cacecc2 Mon Sep 17 00:00:00 2001 From: Mike Yuan Date: Fri, 23 Aug 2024 16:27:06 +0000 Subject: [PATCH 309/374] mm/memcontrol: respect zswap.writeback setting from parent cg too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit e399257349098bf7c84343f99efb2bc9c22eb9fd ] Currently, the behavior of zswap.writeback wrt. the cgroup hierarchy seems a bit odd. Unlike zswap.max, it doesn't honor the value from parent cgroups. This surfaced when people tried to globally disable zswap writeback, i.e. reserve physical swap space only for hibernation [1] - disabling zswap.writeback only for the root cgroup results in subcgroups with zswap.writeback=1 still performing writeback. The inconsistency became more noticeable after I introduced the MemoryZSwapWriteback= systemd unit setting [2] for controlling the knob. The patch assumed that the kernel would enforce the value of parent cgroups. It could probably be workarounded from systemd's side, by going up the slice unit tree and inheriting the value. Yet I think it's more sensible to make it behave consistently with zswap.max and friends. [1] https://wiki.archlinux.org/title/Power_management/Suspend_and_hibernate#Disable_zswap_writeback_to_use_the_swap_space_only_for_hibernation [2] https://github.com/systemd/systemd/pull/31734 Link: https://lkml.kernel.org/r/20240823162506.12117-1-me@yhndnzj.com Fixes: 501a06fe8e4c ("zswap: memcontrol: implement zswap writeback disabling") Signed-off-by: Mike Yuan Reviewed-by: Nhat Pham Acked-by: Yosry Ahmed Cc: Johannes Weiner Cc: Michal Hocko Cc: Michal Koutný Cc: Muchun Song Cc: Roman Gushchin Cc: Shakeel Butt Cc: Signed-off-by: Andrew Morton Signed-off-by: Sasha Levin --- Documentation/admin-guide/cgroup-v2.rst | 7 ++++--- mm/memcontrol.c | 12 +++++++++--- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index b69f701b2485..4a7a59bbf76f 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1706,9 +1706,10 @@ PAGE_SIZE multiple when read back. entries fault back in or are written out to disk. memory.zswap.writeback - A read-write single value file. The default value is "1". The - initial value of the root cgroup is 1, and when a new cgroup is - created, it inherits the current value of its parent. + A read-write single value file. The default value is "1". + Note that this setting is hierarchical, i.e. the writeback would be + implicitly disabled for child cgroups if the upper hierarchy + does so. When this is set to 0, all swapping attempts to swapping devices are disabled. This included both zswap writebacks, and swapping due diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ff1e7d2260ab..5c44d3d304da 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5804,8 +5804,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) memcg->zswap_max = PAGE_COUNTER_MAX; - WRITE_ONCE(memcg->zswap_writeback, - !parent || READ_ONCE(parent->zswap_writeback)); + WRITE_ONCE(memcg->zswap_writeback, true); #endif page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); if (parent) { @@ -8444,7 +8443,14 @@ void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) { /* if zswap is disabled, do not block pages going to the swapping device */ - return !zswap_is_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback); + if (!zswap_is_enabled()) + return true; + + for (; memcg; memcg = parent_mem_cgroup(memcg)) + if (!READ_ONCE(memcg->zswap_writeback)) + return false; + + return true; } static u64 zswap_current_read(struct cgroup_subsys_state *css, From 4b88865d8bf02ec78ab2ea54b403524d9b239cc6 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 25 Jun 2024 21:42:44 +1000 Subject: [PATCH 310/374] workqueue: wq_watchdog_touch is always called with valid CPU [ Upstream commit 18e24deb1cc92f2068ce7434a94233741fbd7771 ] Warn in the case it is called with cpu == -1. This does not appear to happen anywhere. Signed-off-by: Nicholas Piggin Reviewed-by: Paul E. McKenney Signed-off-by: Tejun Heo Stable-dep-of: 98f887f820c9 ("workqueue: Improve scalability of workqueue watchdog touch") Signed-off-by: Sasha Levin --- kernel/workqueue.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c970eec25c5a..f26b0511b023 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -7588,6 +7588,8 @@ notrace void wq_watchdog_touch(int cpu) { if (cpu >= 0) per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; + else + WARN_ONCE(1, "%s should be called with valid CPU", __func__); wq_watchdog_touched = jiffies; } From da5f374103a1e0881bbd35847dc57b04ac155eb0 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 25 Jun 2024 21:42:45 +1000 Subject: [PATCH 311/374] workqueue: Improve scalability of workqueue watchdog touch [ Upstream commit 98f887f820c993e05a12e8aa816c80b8661d4c87 ] On a ~2000 CPU powerpc system, hard lockups have been observed in the workqueue code when stop_machine runs (in this case due to CPU hotplug). This is due to lots of CPUs spinning in multi_cpu_stop, calling touch_nmi_watchdog() which ends up calling wq_watchdog_touch(). wq_watchdog_touch() writes to the global variable wq_watchdog_touched, and that can find itself in the same cacheline as other important workqueue data, which slows down operations to the point of lockups. In the case of the following abridged trace, worker_pool_idr was in the hot line, causing the lockups to always appear at idr_find. watchdog: CPU 1125 self-detected hard LOCKUP @ idr_find Call Trace: get_work_pool __queue_work call_timer_fn run_timer_softirq __do_softirq do_softirq_own_stack irq_exit timer_interrupt decrementer_common_virt * interrupt: 900 (timer) at multi_cpu_stop multi_cpu_stop cpu_stopper_thread smpboot_thread_fn kthread Fix this by having wq_watchdog_touch() only write to the line if the last time a touch was recorded exceeds 1/4 of the watchdog threshold. Reported-by: Srikar Dronamraju Signed-off-by: Nicholas Piggin Reviewed-by: Paul E. McKenney Signed-off-by: Tejun Heo Signed-off-by: Sasha Levin --- kernel/workqueue.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f26b0511b023..ffbf99fb53bf 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -7586,12 +7586,18 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) notrace void wq_watchdog_touch(int cpu) { + unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; + unsigned long touch_ts = READ_ONCE(wq_watchdog_touched); + unsigned long now = jiffies; + if (cpu >= 0) - per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; + per_cpu(wq_watchdog_touched_cpu, cpu) = now; else WARN_ONCE(1, "%s should be called with valid CPU", __func__); - wq_watchdog_touched = jiffies; + /* Don't unnecessarily store to global cacheline */ + if (time_after(now, touch_ts + thresh / 4)) + WRITE_ONCE(wq_watchdog_touched, jiffies); } static void wq_watchdog_set_thresh(unsigned long thresh) From 968d52c4dde2c15d63400078b94bfa170fbb15f7 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 7 Jun 2024 16:55:35 +0200 Subject: [PATCH 312/374] path: add cleanup helper [ Upstream commit ff2c570ef7eaa9ded58e7a02dd7a68874a897508 ] Add a simple cleanup helper so we can cleanup struct path easily. No need for any extra machinery. Avoid DEFINE_FREE() as it causes a local copy of struct path to be used. Just rely on path_put() directly called from a cleanup helper. Link: https://lore.kernel.org/r/20240607-vfs-listmount-reverse-v1-2-7877a2bfa5e5@kernel.org Reviewed-by: Josef Bacik Signed-off-by: Christian Brauner Stable-dep-of: dd7cb142f467 ("fs: relax permissions for listmount()") Signed-off-by: Sasha Levin --- include/linux/path.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/linux/path.h b/include/linux/path.h index 475225a03d0d..ca073e70decd 100644 --- a/include/linux/path.h +++ b/include/linux/path.h @@ -24,4 +24,13 @@ static inline void path_put_init(struct path *path) *path = (struct path) { }; } +/* + * Cleanup macro for use with __free(path_put). Avoids dereference and + * copying @path unlike DEFINE_FREE(). path_put() will handle the empty + * path correctly just ensure @path is initialized: + * + * struct path path __free(path_put) = {}; + */ +#define __free_path_put path_put + #endif /* _LINUX_PATH_H */ From e895aef0485f13685d0274b903b389de87615b6c Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 7 Jun 2024 16:55:36 +0200 Subject: [PATCH 313/374] fs: simplify error handling [ Upstream commit 17e70161281bb66316e94e63a15d1a8498bf6f01 ] Rely on cleanup helper and simplify error handling Link: https://lore.kernel.org/r/20240607-vfs-listmount-reverse-v1-3-7877a2bfa5e5@kernel.org Reviewed-by: Josef Bacik Signed-off-by: Christian Brauner Stable-dep-of: dd7cb142f467 ("fs: relax permissions for listmount()") Signed-off-by: Sasha Levin --- fs/namespace.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/fs/namespace.c b/fs/namespace.c index 1390e9e521d6..ef7b202f8e85 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -5051,7 +5051,7 @@ static struct mount *listmnt_next(struct mount *curr) static ssize_t do_listmount(u64 mnt_parent_id, u64 last_mnt_id, u64 *mnt_ids, size_t nr_mnt_ids) { - struct path root; + struct path root __free(path_put) = {}; struct mnt_namespace *ns = current->nsproxy->mnt_ns; struct path orig; struct mount *r, *first; @@ -5064,10 +5064,8 @@ static ssize_t do_listmount(u64 mnt_parent_id, u64 last_mnt_id, u64 *mnt_ids, orig = root; } else { orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns); - if (!orig.mnt) { - ret = -ENOENT; - goto err; - } + if (!orig.mnt) + return -ENOENT; orig.dentry = orig.mnt->mnt_root; } @@ -5076,14 +5074,12 @@ static ssize_t do_listmount(u64 mnt_parent_id, u64 last_mnt_id, u64 *mnt_ids, * mounts to show users. */ if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &root) && - !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) { - ret = -EPERM; - goto err; - } + !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) + return -EPERM; ret = security_sb_statfs(orig.dentry); if (ret) - goto err; + return ret; if (!last_mnt_id) first = node_to_mount(rb_first(&ns->mounts)); @@ -5100,8 +5096,6 @@ static ssize_t do_listmount(u64 mnt_parent_id, u64 last_mnt_id, u64 *mnt_ids, nr_mnt_ids--; ret++; } -err: - path_put(&root); return ret; } From 9be835e9fd63b17d202bfd49c2912faf7785fb0a Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Mon, 24 Jun 2024 11:49:44 -0400 Subject: [PATCH 314/374] fs: relax permissions for listmount() [ Upstream commit dd7cb142f467c4660698bcaa4a48c688b443ab81 ] It is sufficient to have capabilities in the owning user namespace of the mount namespace to list all mounts regardless of whether they are reachable or not. Link: https://lore.kernel.org/r/8adc0d3f4f7495faacc6a7c63095961f7f1637c7.1719243756.git.josef@toxicpanda.com Signed-off-by: Christian Brauner Signed-off-by: Sasha Levin --- fs/namespace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/namespace.c b/fs/namespace.c index ef7b202f8e85..e1ced589d835 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -5074,7 +5074,7 @@ static ssize_t do_listmount(u64 mnt_parent_id, u64 last_mnt_id, u64 *mnt_ids, * mounts to show users. */ if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &root) && - !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) + !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) return -EPERM; ret = security_sb_statfs(orig.dentry); From 560eef75500d9089c71134187c5fe177e2235e00 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 29 May 2024 14:34:31 +0100 Subject: [PATCH 315/374] ACPI: processor: Return an error if acpi_processor_get_info() fails in processor_add() [ Upstream commit fadf231f0a06a6748a7fc4a2c29ac9ef7bca6bfd ] Rafael observed [1] that returning 0 from processor_add() will result in acpi_default_enumeration() being called which will attempt to create a platform device, but that makes little sense when the processor is known to be not available. So just return the error code from acpi_processor_get_info() instead. Link: https://lore.kernel.org/all/CAJZ5v0iKU8ra9jR+EmgxbuNm=Uwx2m1-8vn_RAZ+aCiUVLe3Pw@mail.gmail.com/ [1] Suggested-by: Rafael J. Wysocki Acked-by: Rafael J. Wysocki Reviewed-by: Gavin Shan Signed-off-by: Jonathan Cameron Link: https://lore.kernel.org/r/20240529133446.28446-5-Jonathan.Cameron@huawei.com Signed-off-by: Catalin Marinas Stable-dep-of: 47ec9b417ed9 ("ACPI: processor: Fix memory leaks in error paths of processor_add()") Signed-off-by: Sasha Levin --- drivers/acpi/acpi_processor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 7a0dd35d62c9..c052cdeca9fd 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -400,7 +400,7 @@ static int acpi_processor_add(struct acpi_device *device, result = acpi_processor_get_info(device); if (result) /* Processor is not physically present or unavailable */ - return 0; + return result; BUG_ON(pr->id >= nr_cpu_ids); From eea54a453c1d82cab8e691e46bca90966352f956 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 29 May 2024 14:34:32 +0100 Subject: [PATCH 316/374] ACPI: processor: Fix memory leaks in error paths of processor_add() [ Upstream commit 47ec9b417ed9b6b8ec2a941cd84d9de62adc358a ] If acpi_processor_get_info() returned an error, pr and the associated pr->throttling.shared_cpu_map were leaked. The unwind code was in the wrong order wrt to setup, relying on some unwind actions having no affect (clearing variables that were never set etc). That makes it harder to reason about so reorder and add appropriate labels to only undo what was actually set up in the first place. Acked-by: Rafael J. Wysocki Reviewed-by: Gavin Shan Signed-off-by: Jonathan Cameron Link: https://lore.kernel.org/r/20240529133446.28446-6-Jonathan.Cameron@huawei.com Signed-off-by: Catalin Marinas Signed-off-by: Sasha Levin --- drivers/acpi/acpi_processor.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index c052cdeca9fd..5f5a01ccfc3a 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -400,7 +400,7 @@ static int acpi_processor_add(struct acpi_device *device, result = acpi_processor_get_info(device); if (result) /* Processor is not physically present or unavailable */ - return result; + goto err_clear_driver_data; BUG_ON(pr->id >= nr_cpu_ids); @@ -415,7 +415,7 @@ static int acpi_processor_add(struct acpi_device *device, "BIOS reported wrong ACPI id %d for the processor\n", pr->id); /* Give up, but do not abort the namespace scan. */ - goto err; + goto err_clear_driver_data; } /* * processor_device_array is not cleared on errors to allow buggy BIOS @@ -427,12 +427,12 @@ static int acpi_processor_add(struct acpi_device *device, dev = get_cpu_device(pr->id); if (!dev) { result = -ENODEV; - goto err; + goto err_clear_per_cpu; } result = acpi_bind_one(dev, device); if (result) - goto err; + goto err_clear_per_cpu; pr->dev = dev; @@ -443,10 +443,11 @@ static int acpi_processor_add(struct acpi_device *device, dev_err(dev, "Processor driver could not be attached\n"); acpi_unbind_one(dev); - err: - free_cpumask_var(pr->throttling.shared_cpu_map); - device->driver_data = NULL; + err_clear_per_cpu: per_cpu(processors, pr->id) = NULL; + err_clear_driver_data: + device->driver_data = NULL; + free_cpumask_var(pr->throttling.shared_cpu_map); err_free_pr: kfree(pr); return result; From 7c7d598974b5993b2c47a7f43aec8db41a9d0095 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 29 May 2024 14:34:38 +0100 Subject: [PATCH 317/374] arm64: acpi: Move get_cpu_for_acpi_id() to a header [ Upstream commit 8d34b6f17b9ac93faa2791eb037dcb08bdf755de ] ACPI identifies CPUs by UID. get_cpu_for_acpi_id() maps the ACPI UID to the Linux CPU number. The helper to retrieve this mapping is only available in arm64's NUMA code. Move it to live next to get_acpi_id_for_cpu(). Signed-off-by: James Morse Reviewed-by: Jonathan Cameron Reviewed-by: Gavin Shan Tested-by: Miguel Luis Tested-by: Vishnu Pajjuri Tested-by: Jianyong Wu Signed-off-by: Russell King (Oracle) Acked-by: Hanjun Guo Signed-off-by: Jonathan Cameron Reviewed-by: Lorenzo Pieralisi Link: https://lore.kernel.org/r/20240529133446.28446-12-Jonathan.Cameron@huawei.com Signed-off-by: Catalin Marinas Stable-dep-of: 2488444274c7 ("arm64: acpi: Harden get_cpu_for_acpi_id() against missing CPU entry") Signed-off-by: Sasha Levin --- arch/arm64/include/asm/acpi.h | 11 +++++++++++ arch/arm64/kernel/acpi_numa.c | 11 ----------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 6792a1f83f2a..bc9a6656fc0c 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -119,6 +119,17 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu) return acpi_cpu_get_madt_gicc(cpu)->uid; } +static inline int get_cpu_for_acpi_id(u32 uid) +{ + int cpu; + + for (cpu = 0; cpu < nr_cpu_ids; cpu++) + if (uid == get_acpi_id_for_cpu(cpu)) + return cpu; + + return -EINVAL; +} + static inline void arch_fix_phys_package_id(int num, u32 slot) { } void __init acpi_init_cpus(void); int apei_claim_sea(struct pt_regs *regs); diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c index ccbff21ce1fa..2465f291c7e1 100644 --- a/arch/arm64/kernel/acpi_numa.c +++ b/arch/arm64/kernel/acpi_numa.c @@ -34,17 +34,6 @@ int __init acpi_numa_get_nid(unsigned int cpu) return acpi_early_node_map[cpu]; } -static inline int get_cpu_for_acpi_id(u32 uid) -{ - int cpu; - - for (cpu = 0; cpu < nr_cpu_ids; cpu++) - if (uid == get_acpi_id_for_cpu(cpu)) - return cpu; - - return -EINVAL; -} - static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header, const unsigned long end) { From bc7fbb37e3d2df59336eadbd6a56be632e3c7df7 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 29 May 2024 14:34:39 +0100 Subject: [PATCH 318/374] arm64: acpi: Harden get_cpu_for_acpi_id() against missing CPU entry [ Upstream commit 2488444274c70038eb6b686cba5f1ce48ebb9cdd ] In a review discussion of the changes to support vCPU hotplug where a check was added on the GICC being enabled if was online, it was noted that there is need to map back to the cpu and use that to index into a cpumask. As such, a valid ID is needed. If an MPIDR check fails in acpi_map_gic_cpu_interface() it is possible for the entry in cpu_madt_gicc[cpu] == NULL. This function would then cause a NULL pointer dereference. Whilst a path to trigger this has not been established, harden this caller against the possibility. Reviewed-by: Gavin Shan Signed-off-by: Jonathan Cameron Link: https://lore.kernel.org/r/20240529133446.28446-13-Jonathan.Cameron@huawei.com Signed-off-by: Catalin Marinas Signed-off-by: Sasha Levin --- arch/arm64/include/asm/acpi.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index bc9a6656fc0c..a407f9cd549e 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -124,7 +124,8 @@ static inline int get_cpu_for_acpi_id(u32 uid) int cpu; for (cpu = 0; cpu < nr_cpu_ids; cpu++) - if (uid == get_acpi_id_for_cpu(cpu)) + if (acpi_cpu_get_madt_gicc(cpu) && + uid == get_acpi_id_for_cpu(cpu)) return cpu; return -EINVAL; From 018028d42bbc83297a8fd6a5d2db5878e42ef23a Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 11 Jan 2023 20:25:48 +0100 Subject: [PATCH 319/374] can: mcp251xfd: mcp251xfd_handle_rxif_ring_uinc(): factor out in separate function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit d49184b7b585f9da7ee546b744525f62117019f6 ] This is a preparation patch. Sending the UINC messages followed by incrementing the tail pointer will be called in more than one place in upcoming patches, so factor this out into a separate function. Also make mcp251xfd_handle_rxif_ring_uinc() safe to be called with a "len" of 0. Tested-by: Stefan Althöfer Tested-by: Thomas Kopp Signed-off-by: Marc Kleine-Budde Stable-dep-of: 85505e585637 ("can: mcp251xfd: rx: prepare to workaround broken RX FIFO head index erratum") Signed-off-by: Sasha Levin --- drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c | 48 +++++++++++++------- 1 file changed, 32 insertions(+), 16 deletions(-) diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c index ced8d9c81f8c..5e2f39de88f3 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c @@ -197,6 +197,37 @@ mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv, return err; } +static int +mcp251xfd_handle_rxif_ring_uinc(const struct mcp251xfd_priv *priv, + struct mcp251xfd_rx_ring *ring, + u8 len) +{ + int offset; + int err; + + if (!len) + return 0; + + /* Increment the RX FIFO tail pointer 'len' times in a + * single SPI message. + * + * Note: + * Calculate offset, so that the SPI transfer ends on + * the last message of the uinc_xfer array, which has + * "cs_change == 0", to properly deactivate the chip + * select. + */ + offset = ARRAY_SIZE(ring->uinc_xfer) - len; + err = spi_sync_transfer(priv->spi, + ring->uinc_xfer + offset, len); + if (err) + return err; + + ring->tail += len; + + return 0; +} + static int mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, struct mcp251xfd_rx_ring *ring) @@ -210,8 +241,6 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, return err; while ((len = mcp251xfd_get_rx_linear_len(ring))) { - int offset; - rx_tail = mcp251xfd_get_rx_tail(ring); err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj, @@ -227,22 +256,9 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, return err; } - /* Increment the RX FIFO tail pointer 'len' times in a - * single SPI message. - * - * Note: - * Calculate offset, so that the SPI transfer ends on - * the last message of the uinc_xfer array, which has - * "cs_change == 0", to properly deactivate the chip - * select. - */ - offset = ARRAY_SIZE(ring->uinc_xfer) - len; - err = spi_sync_transfer(priv->spi, - ring->uinc_xfer + offset, len); + err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, len); if (err) return err; - - ring->tail += len; } return 0; From 185c150a826e5461e30405cb21dc0de241c1469a Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 11 Jan 2023 21:07:03 +0100 Subject: [PATCH 320/374] can: mcp251xfd: rx: prepare to workaround broken RX FIFO head index erratum MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 85505e585637a737e4713c1386c30e37c325b82e ] This is a preparatory patch to work around erratum DS80000789E 6 of the mcp2518fd, the other variants of the chip family (mcp2517fd and mcp251863) are probably also affected. When handling the RX interrupt, the driver iterates over all pending FIFOs (which are implemented as ring buffers in hardware) and reads the FIFO header index from the RX FIFO STA register of the chip. In the bad case, the driver reads a too large head index. In the original code, the driver always trusted the read value, which caused old CAN frames that were already processed, or new, incompletely written CAN frames to be (re-)processed. Instead of reading and trusting the head index, read the head index and calculate the number of CAN frames that were supposedly received - replace mcp251xfd_rx_ring_update() with mcp251xfd_get_rx_len(). The mcp251xfd_handle_rxif_ring() function reads the received CAN frames from the chip, iterates over them and pushes them into the network stack. Prepare that the iteration can be stopped if an old CAN frame is detected. The actual code to detect old or incomplete frames and abort will be added in the next patch. Link: https://lore.kernel.org/all/BL3PR11MB64844C1C95CA3BDADAE4D8CCFBC99@BL3PR11MB6484.namprd11.prod.outlook.com Reported-by: Stefan Althöfer Closes: https://lore.kernel.org/all/FR0P281MB1966273C216630B120ABB6E197E89@FR0P281MB1966.DEUP281.PROD.OUTLOOK.COM Tested-by: Stefan Althöfer Tested-by: Thomas Kopp Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- .../net/can/spi/mcp251xfd/mcp251xfd-ring.c | 2 + drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c | 89 +++++++++++-------- drivers/net/can/spi/mcp251xfd/mcp251xfd.h | 12 +-- 3 files changed, 56 insertions(+), 47 deletions(-) diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c index 3a941a71c78f..5f92aed62ff9 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c @@ -523,6 +523,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) } rx_ring->obj_num = rx_obj_num; + rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) - + ilog2(rx_obj_num); rx_ring->obj_size = rx_obj_size; priv->rx[i] = rx_ring; } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c index 5e2f39de88f3..5d0fb1c454cd 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c @@ -2,7 +2,7 @@ // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // -// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, // Marc Kleine-Budde // // Based on: @@ -16,23 +16,14 @@ #include "mcp251xfd.h" -static inline int -mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv, - const struct mcp251xfd_rx_ring *ring, - u8 *rx_head, bool *fifo_empty) +static inline bool mcp251xfd_rx_fifo_sta_empty(const u32 fifo_sta) { - u32 fifo_sta; - int err; - - err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), - &fifo_sta); - if (err) - return err; - - *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); - *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); + return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); +} - return 0; +static inline bool mcp251xfd_rx_fifo_sta_full(const u32 fifo_sta) +{ + return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF; } static inline int @@ -80,29 +71,49 @@ mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv, } static int -mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv, - struct mcp251xfd_rx_ring *ring) +mcp251xfd_get_rx_len(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring, + u8 *len_p) { - u32 new_head; - u8 chip_rx_head; - bool fifo_empty; + const u8 shift = ring->obj_num_shift_to_u8; + u8 chip_head, tail, len; + u32 fifo_sta; int err; - err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head, - &fifo_empty); - if (err || fifo_empty) + err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), + &fifo_sta); + if (err) + return err; + + if (mcp251xfd_rx_fifo_sta_empty(fifo_sta)) { + *len_p = 0; + return 0; + } + + if (mcp251xfd_rx_fifo_sta_full(fifo_sta)) { + *len_p = ring->obj_num; + return 0; + } + + chip_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); + + err = mcp251xfd_check_rx_tail(priv, ring); + if (err) return err; + tail = mcp251xfd_get_rx_tail(ring); - /* chip_rx_head, is the next RX-Object filled by the HW. - * The new RX head must be >= the old head. + /* First shift to full u8. The subtraction works on signed + * values, that keeps the difference steady around the u8 + * overflow. The right shift acts on len, which is an u8. */ - new_head = round_down(ring->head, ring->obj_num) + chip_rx_head; - if (new_head <= ring->head) - new_head += ring->obj_num; + BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(chip_head)); + BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(tail)); + BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(len)); - ring->head = new_head; + len = (chip_head << shift) - (tail << shift); + *len_p = len >> shift; - return mcp251xfd_check_rx_tail(priv, ring); + return 0; } static void @@ -208,6 +219,8 @@ mcp251xfd_handle_rxif_ring_uinc(const struct mcp251xfd_priv *priv, if (!len) return 0; + ring->head += len; + /* Increment the RX FIFO tail pointer 'len' times in a * single SPI message. * @@ -233,22 +246,22 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, struct mcp251xfd_rx_ring *ring) { struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj; - u8 rx_tail, len; + u8 rx_tail, len, l; int err, i; - err = mcp251xfd_rx_ring_update(priv, ring); + err = mcp251xfd_get_rx_len(priv, ring, &len); if (err) return err; - while ((len = mcp251xfd_get_rx_linear_len(ring))) { + while ((l = mcp251xfd_get_rx_linear_len(ring, len))) { rx_tail = mcp251xfd_get_rx_tail(ring); err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj, - rx_tail, len); + rx_tail, l); if (err) return err; - for (i = 0; i < len; i++) { + for (i = 0; i < l; i++) { err = mcp251xfd_handle_rxif_one(priv, ring, (void *)hw_rx_obj + i * ring->obj_size); @@ -256,9 +269,11 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, return err; } - err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, len); + err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, l); if (err) return err; + + len -= l; } return 0; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index 4628bf847bc9..2e5cee6ad0c4 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -558,6 +558,7 @@ struct mcp251xfd_rx_ring { u8 nr; u8 fifo_nr; u8 obj_num; + u8 obj_num_shift_to_u8; u8 obj_size; union mcp251xfd_write_reg_buf irq_enable_buf; @@ -907,18 +908,9 @@ static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring) return ring->tail & (ring->obj_num - 1); } -static inline u8 mcp251xfd_get_rx_len(const struct mcp251xfd_rx_ring *ring) -{ - return ring->head - ring->tail; -} - static inline u8 -mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring) +mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring, u8 len) { - u8 len; - - len = mcp251xfd_get_rx_len(ring); - return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring)); } From 91c389c7aa9325466eb17f69cbfa1c4e6846adaf Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 11 Jan 2023 11:48:16 +0100 Subject: [PATCH 321/374] can: mcp251xfd: clarify the meaning of timestamp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit e793c724b48ca8cae9693bc3be528e85284c126a ] The mcp251xfd chip is configured to provide a timestamp with each received and transmitted CAN frame. The timestamp is derived from the internal free-running timer, which can also be read from the TBC register via SPI. The timer is 32 bits wide and is clocked by the external oscillator (typically 20 or 40 MHz). To avoid confusion, we call this timestamp "timestamp_raw" or "ts_raw" for short. Using the timecounter framework, the "ts_raw" is converted to 64 bit nanoseconds since the epoch. This is what we call "timestamp". This is a preparation for the next patches which use the "timestamp" to work around a bug where so far only the "ts_raw" is used. Tested-by: Stefan Althöfer Tested-by: Thomas Kopp Signed-off-by: Marc Kleine-Budde Stable-dep-of: 24436be590c6 ("can: mcp251xfd: rx: add workaround for erratum DS80000789E 6 of mcp2518fd") Signed-off-by: Sasha Levin --- .../net/can/spi/mcp251xfd/mcp251xfd-core.c | 28 +++++++++---------- drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c | 2 +- drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c | 2 +- .../can/spi/mcp251xfd/mcp251xfd-timestamp.c | 22 ++++----------- drivers/net/can/spi/mcp251xfd/mcp251xfd.h | 27 ++++++++++++++---- 5 files changed, 43 insertions(+), 38 deletions(-) diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index bf1589aef1fc..f1e6007b74ce 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -2,7 +2,7 @@ // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // -// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, // Marc Kleine-Budde // // Based on: @@ -867,18 +867,18 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev, static struct sk_buff * mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv, - struct can_frame **cf, u32 *timestamp) + struct can_frame **cf, u32 *ts_raw) { struct sk_buff *skb; int err; - err = mcp251xfd_get_timestamp(priv, timestamp); + err = mcp251xfd_get_timestamp_raw(priv, ts_raw); if (err) return NULL; skb = alloc_can_err_skb(priv->ndev, cf); if (skb) - mcp251xfd_skb_set_timestamp(priv, skb, *timestamp); + mcp251xfd_skb_set_timestamp_raw(priv, skb, *ts_raw); return skb; } @@ -889,7 +889,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) struct mcp251xfd_rx_ring *ring; struct sk_buff *skb; struct can_frame *cf; - u32 timestamp, rxovif; + u32 ts_raw, rxovif; int err, i; stats->rx_over_errors++; @@ -924,14 +924,14 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) return err; } - skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp); + skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw); if (!skb) return 0; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw); if (err) stats->rx_fifo_errors++; @@ -948,12 +948,12 @@ static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv) static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) { struct net_device_stats *stats = &priv->ndev->stats; - u32 bdiag1, timestamp; + u32 bdiag1, ts_raw; struct sk_buff *skb; struct can_frame *cf = NULL; int err; - err = mcp251xfd_get_timestamp(priv, ×tamp); + err = mcp251xfd_get_timestamp_raw(priv, &ts_raw); if (err) return err; @@ -1035,8 +1035,8 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) if (!cf) return 0; - mcp251xfd_skb_set_timestamp(priv, skb, timestamp); - err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + mcp251xfd_skb_set_timestamp_raw(priv, skb, ts_raw); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw); if (err) stats->rx_fifo_errors++; @@ -1049,7 +1049,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) struct sk_buff *skb; struct can_frame *cf = NULL; enum can_state new_state, rx_state, tx_state; - u32 trec, timestamp; + u32 trec, ts_raw; int err; err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec); @@ -1079,7 +1079,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) /* The skb allocation might fail, but can_change_state() * handles cf == NULL. */ - skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp); + skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw); can_change_state(priv->ndev, cf, tx_state, rx_state); if (new_state == CAN_STATE_BUS_OFF) { @@ -1110,7 +1110,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) cf->data[7] = bec.rxerr; } - err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw); if (err) stats->rx_fifo_errors++; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c index 5d0fb1c454cd..a79e6c661ecc 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c @@ -160,7 +160,7 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)) memcpy(cfd->data, hw_rx_obj->data, cfd->len); - mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts); + mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_rx_obj->ts); } static int diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c index 5b0c7890d4b4..3886476a8f8e 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c @@ -97,7 +97,7 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, tef_tail = mcp251xfd_get_tef_tail(priv); skb = priv->can.echo_skb[tef_tail]; if (skb) - mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts); + mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_tef_obj->ts); stats->tx_bytes += can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, tef_tail, hw_tef_obj->ts, diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c index 712e09186987..1db99aabe85c 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c @@ -2,7 +2,7 @@ // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // -// Copyright (c) 2021 Pengutronix, +// Copyright (c) 2021, 2023 Pengutronix, // Marc Kleine-Budde // @@ -11,20 +11,20 @@ #include "mcp251xfd.h" -static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc) +static u64 mcp251xfd_timestamp_raw_read(const struct cyclecounter *cc) { const struct mcp251xfd_priv *priv; - u32 timestamp = 0; + u32 ts_raw = 0; int err; priv = container_of(cc, struct mcp251xfd_priv, cc); - err = mcp251xfd_get_timestamp(priv, ×tamp); + err = mcp251xfd_get_timestamp_raw(priv, &ts_raw); if (err) netdev_err(priv->ndev, "Error %d while reading timestamp. HW timestamps may be inaccurate.", err); - return timestamp; + return ts_raw; } static void mcp251xfd_timestamp_work(struct work_struct *work) @@ -39,21 +39,11 @@ static void mcp251xfd_timestamp_work(struct work_struct *work) MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); } -void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv, - struct sk_buff *skb, u32 timestamp) -{ - struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); - u64 ns; - - ns = timecounter_cyc2time(&priv->tc, timestamp); - hwtstamps->hwtstamp = ns_to_ktime(ns); -} - void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv) { struct cyclecounter *cc = &priv->cc; - cc->read = mcp251xfd_timestamp_read; + cc->read = mcp251xfd_timestamp_raw_read; cc->mask = CYCLECOUNTER_MASK(32); cc->shift = 1; cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index 2e5cee6ad0c4..ae35845d4ce1 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -2,7 +2,7 @@ * * mcp251xfd - Microchip MCP251xFD Family CAN controller driver * - * Copyright (c) 2019, 2020, 2021 Pengutronix, + * Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, * Marc Kleine-Budde * Copyright (c) 2019 Martin Sperl */ @@ -812,10 +812,27 @@ mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv, return data; } -static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv, - u32 *timestamp) +static inline int mcp251xfd_get_timestamp_raw(const struct mcp251xfd_priv *priv, + u32 *ts_raw) { - return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp); + return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, ts_raw); +} + +static inline void mcp251xfd_skb_set_timestamp(struct sk_buff *skb, u64 ns) +{ + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); + + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +static inline +void mcp251xfd_skb_set_timestamp_raw(const struct mcp251xfd_priv *priv, + struct sk_buff *skb, u32 ts_raw) +{ + u64 ns; + + ns = timecounter_cyc2time(&priv->tc, ts_raw); + mcp251xfd_skb_set_timestamp(skb, ns); } static inline u16 mcp251xfd_get_tef_obj_addr(u8 n) @@ -936,8 +953,6 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv); int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv); int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv); int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv); -void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv, - struct sk_buff *skb, u32 timestamp); void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv); void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv); From d33a5bff705928a4f9b2477bada6b807551949e0 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 11 Jan 2023 11:53:50 +0100 Subject: [PATCH 322/374] can: mcp251xfd: rx: add workaround for erratum DS80000789E 6 of mcp2518fd MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 24436be590c6fbb05f6161b0dfba7d9da60214aa ] This patch tries to works around erratum DS80000789E 6 of the mcp2518fd, the other variants of the chip family (mcp2517fd and mcp251863) are probably also affected. In the bad case, the driver reads a too large head index. In the original code, the driver always trusted the read value, which caused old, already processed CAN frames or new, incompletely written CAN frames to be (re-)processed. To work around this issue, keep a per FIFO timestamp [1] of the last valid received CAN frame and compare against the timestamp of every received CAN frame. If an old CAN frame is detected, abort the iteration and mark the number of valid CAN frames as processed in the chip by incrementing the FIFO's tail index. Further tests showed that this workaround can recognize old CAN frames, but a small time window remains in which partially written CAN frames [2] are not recognized but then processed. These CAN frames have the correct data and time stamps, but the DLC has not yet been updated. [1] As the raw timestamp overflows every 107 seconds (at the usual clock rate of 40 MHz) convert it to nanoseconds with the timecounter framework and use this to detect stale CAN frames. Link: https://lore.kernel.org/all/BL3PR11MB64844C1C95CA3BDADAE4D8CCFBC99@BL3PR11MB6484.namprd11.prod.outlook.com [2] Reported-by: Stefan Althöfer Closes: https://lore.kernel.org/all/FR0P281MB1966273C216630B120ABB6E197E89@FR0P281MB1966.DEUP281.PROD.OUTLOOK.COM Tested-by: Stefan Althöfer Tested-by: Thomas Kopp Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- .../net/can/spi/mcp251xfd/mcp251xfd-ring.c | 1 + drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c | 32 +++++++++++++++++-- drivers/net/can/spi/mcp251xfd/mcp251xfd.h | 3 ++ 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c index 5f92aed62ff9..f72582d4d3e8 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c @@ -206,6 +206,7 @@ mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr) int i, j; mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { + rx_ring->last_valid = timecounter_read(&priv->tc); rx_ring->head = 0; rx_ring->tail = 0; rx_ring->base = *base; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c index a79e6c661ecc..fe897f3e4c12 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c @@ -159,8 +159,6 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)) memcpy(cfd->data, hw_rx_obj->data, cfd->len); - - mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_rx_obj->ts); } static int @@ -171,8 +169,26 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, struct net_device_stats *stats = &priv->ndev->stats; struct sk_buff *skb; struct canfd_frame *cfd; + u64 timestamp; int err; + /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI + * bits of a FIFOSTA register, here the RX FIFO head index + * might be corrupted and we might process past the RX FIFO's + * head into old CAN frames. + * + * Compare the timestamp of currently processed CAN frame with + * last valid frame received. Abort with -EBADMSG if an old + * CAN frame is detected. + */ + timestamp = timecounter_cyc2time(&priv->tc, hw_rx_obj->ts); + if (timestamp <= ring->last_valid) { + stats->rx_fifo_errors++; + + return -EBADMSG; + } + ring->last_valid = timestamp; + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) skb = alloc_canfd_skb(priv->ndev, &cfd); else @@ -183,6 +199,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, return 0; } + mcp251xfd_skb_set_timestamp(skb, timestamp); mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb); err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts); if (err) @@ -265,7 +282,16 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, err = mcp251xfd_handle_rxif_one(priv, ring, (void *)hw_rx_obj + i * ring->obj_size); - if (err) + + /* -EBADMSG means we're affected by mcp2518fd + * erratum DS80000789E 6., i.e. the timestamp + * in the RX object is older that the last + * valid received CAN frame. Don't process any + * further and mark processed frames as good. + */ + if (err == -EBADMSG) + return mcp251xfd_handle_rxif_ring_uinc(priv, ring, i); + else if (err) return err; } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index ae35845d4ce1..991662fbba42 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -554,6 +554,9 @@ struct mcp251xfd_rx_ring { unsigned int head; unsigned int tail; + /* timestamp of the last valid received CAN frame */ + u64 last_valid; + u16 base; u8 nr; u8 fifo_nr; From 4ba7c00d39f6c34439b5e20e6cfa5696411fd714 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Fri, 2 Feb 2024 14:00:27 -0500 Subject: [PATCH 323/374] drm/amd: Add gfx12 swizzle mode defs [ Upstream commit 7ceb94e87bffff7c12b61eb29749e1d8ac976896 ] Add GFX12 swizzle mode definitions for use with DCN401 Signed-off-by: Aurabindo Pillai Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher Stable-dep-of: 8dd1426e2c80 ("drm/amdgpu: handle gfx12 in amdgpu_display_verify_sizes") Signed-off-by: Sasha Levin --- include/uapi/drm/drm_fourcc.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 84d502e42961..4168445fbb8b 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -1476,6 +1476,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) #define AMD_FMT_MOD_TILE_VER_GFX10 2 #define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3 #define AMD_FMT_MOD_TILE_VER_GFX11 4 +#define AMD_FMT_MOD_TILE_VER_GFX12 5 /* * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical @@ -1486,6 +1487,8 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) /* * 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has * GFX9 as canonical version. + * + * 64K_D_2D on GFX12 is identical to 64K_D on GFX11. */ #define AMD_FMT_MOD_TILE_GFX9_64K_D 10 #define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25 @@ -1493,6 +1496,19 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) #define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27 #define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31 +/* Gfx12 swizzle modes: + * 0 - LINEAR + * 1 - 256B_2D - 2D block dimensions + * 2 - 4KB_2D + * 3 - 64KB_2D + * 4 - 256KB_2D + * 5 - 4KB_3D - 3D block dimensions + * 6 - 64KB_3D + * 7 - 256KB_3D + */ +#define AMD_FMT_MOD_TILE_GFX12_64K_2D 3 +#define AMD_FMT_MOD_TILE_GFX12_256K_2D 4 + #define AMD_FMT_MOD_DCC_BLOCK_64B 0 #define AMD_FMT_MOD_DCC_BLOCK_128B 1 #define AMD_FMT_MOD_DCC_BLOCK_256B 2 From 830bdd8a183d689f78f377f715ce502b4d60c773 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= Date: Sat, 1 Jun 2024 19:53:01 -0400 Subject: [PATCH 324/374] drm/amdgpu: handle gfx12 in amdgpu_display_verify_sizes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 8dd1426e2c80e32ac1995007330c8f95ffa28ebb ] It verified GFX9-11 swizzle modes on GFX12, which has undefined behavior. Signed-off-by: Marek Olšák Acked-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 27 ++++++++++++++++++++- include/uapi/drm/drm_fourcc.h | 2 ++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 4fcc227db00b..30755ce4002d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1041,6 +1041,30 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) block_width = 256 / format_info->cpp[i]; block_height = 1; block_size_log2 = 8; + } else if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12) { + int swizzle = AMD_FMT_MOD_GET(TILE, modifier); + + switch (swizzle) { + case AMD_FMT_MOD_TILE_GFX12_256B_2D: + block_size_log2 = 8; + break; + case AMD_FMT_MOD_TILE_GFX12_4K_2D: + block_size_log2 = 12; + break; + case AMD_FMT_MOD_TILE_GFX12_64K_2D: + block_size_log2 = 16; + break; + case AMD_FMT_MOD_TILE_GFX12_256K_2D: + block_size_log2 = 18; + break; + default: + drm_dbg_kms(rfb->base.dev, + "Gfx12 swizzle mode with unknown block size: %d\n", swizzle); + return -EINVAL; + } + + get_block_dimensions(block_size_log2, format_info->cpp[i], + &block_width, &block_height); } else { int swizzle = AMD_FMT_MOD_GET(TILE, modifier); @@ -1076,7 +1100,8 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) return ret; } - if (AMD_FMT_MOD_GET(DCC, modifier)) { + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11 && + AMD_FMT_MOD_GET(DCC, modifier)) { if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) { block_size_log2 = get_dcc_block_size(modifier, false, false); get_block_dimensions(block_size_log2 + 8, format_info->cpp[0], diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 4168445fbb8b..2d84a8052b15 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -1506,6 +1506,8 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) * 6 - 64KB_3D * 7 - 256KB_3D */ +#define AMD_FMT_MOD_TILE_GFX12_256B_2D 1 +#define AMD_FMT_MOD_TILE_GFX12_4K_2D 2 #define AMD_FMT_MOD_TILE_GFX12_64K_2D 3 #define AMD_FMT_MOD_TILE_GFX12_256K_2D 4 From 79f80316e6a966bc105c186ccb53e2411e097482 Mon Sep 17 00:00:00 2001 From: Igor Pylypiv Date: Tue, 2 Jul 2024 02:47:32 +0000 Subject: [PATCH 325/374] ata: libata-scsi: Remove redundant sense_buffer memsets [ Upstream commit 3f6d903b54a137e9e438d9c3b774b5d0432917bc ] SCSI layer clears sense_buffer in scsi_queue_rq() so there is no need for libata to clear it again. Reviewed-by: Hannes Reinecke Reviewed-by: Damien Le Moal Reviewed-by: Niklas Cassel Signed-off-by: Igor Pylypiv Link: https://lore.kernel.org/r/20240702024735.1152293-5-ipylypiv@google.com Signed-off-by: Niklas Cassel Stable-dep-of: 816be86c7993 ("ata: libata-scsi: Check ATA_QCFLAG_RTF_FILLED before using result_tf") Signed-off-by: Sasha Levin --- drivers/ata/libata-scsi.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 4e0847601103..4deee71006ef 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -926,11 +926,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) { struct scsi_cmnd *cmd = qc->scsicmd; struct ata_taskfile *tf = &qc->result_tf; - unsigned char *sb = cmd->sense_buffer; u8 sense_key, asc, ascq; - memset(sb, 0, SCSI_SENSE_BUFFERSIZE); - /* * Use ata_to_sense_error() to map status register bits * onto sense key, asc & ascq. @@ -976,8 +973,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) u64 block; u8 sense_key, asc, ascq; - memset(sb, 0, SCSI_SENSE_BUFFERSIZE); - if (ata_dev_disabled(dev)) { /* Device disabled after error recovery */ /* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */ From f9d0026c628ce7f20e2e43ba4d5a56612fed2880 Mon Sep 17 00:00:00 2001 From: Igor Pylypiv Date: Tue, 2 Jul 2024 02:47:35 +0000 Subject: [PATCH 326/374] ata: libata-scsi: Check ATA_QCFLAG_RTF_FILLED before using result_tf [ Upstream commit 816be86c7993d3c5832c3017c0056297e86f978c ] qc->result_tf contents are only valid when the ATA_QCFLAG_RTF_FILLED flag is set. The ATA_QCFLAG_RTF_FILLED flag should be always set for commands that failed or for commands that have the ATA_QCFLAG_RESULT_TF flag set. Reviewed-by: Hannes Reinecke Reviewed-by: Damien Le Moal Reviewed-by: Niklas Cassel Signed-off-by: Igor Pylypiv Link: https://lore.kernel.org/r/20240702024735.1152293-8-ipylypiv@google.com Signed-off-by: Niklas Cassel Signed-off-by: Sasha Levin --- drivers/ata/libata-scsi.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 4deee71006ef..4116ae088719 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -242,10 +242,17 @@ void ata_scsi_set_sense_information(struct ata_device *dev, */ static void ata_scsi_set_passthru_sense_fields(struct ata_queued_cmd *qc) { + struct ata_device *dev = qc->dev; struct scsi_cmnd *cmd = qc->scsicmd; struct ata_taskfile *tf = &qc->result_tf; unsigned char *sb = cmd->sense_buffer; + if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { + ata_dev_dbg(dev, + "missing result TF: can't set ATA PT sense fields\n"); + return; + } + if ((sb[0] & 0x7f) >= 0x72) { unsigned char *desc; u8 len; @@ -924,10 +931,17 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, */ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) { + struct ata_device *dev = qc->dev; struct scsi_cmnd *cmd = qc->scsicmd; struct ata_taskfile *tf = &qc->result_tf; u8 sense_key, asc, ascq; + if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { + ata_dev_dbg(dev, + "missing result TF: can't generate ATA PT sense data\n"); + return; + } + /* * Use ata_to_sense_error() to map status register bits * onto sense key, asc & ascq. @@ -979,6 +993,13 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21); return; } + + if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { + ata_dev_dbg(dev, + "missing result TF: can't generate sense data\n"); + return; + } + /* Use ata_to_sense_error() to map status register bits * onto sense key, asc & ascq. */ From 0e3da92dbf6e30572e5ab2e1f00445b6470b41f3 Mon Sep 17 00:00:00 2001 From: Jia Jie Ho Date: Wed, 26 Jun 2024 09:40:42 +0800 Subject: [PATCH 327/374] crypto: starfive - Align rsa input data to 32-bit [ Upstream commit 6aad7019f697ab0bed98eba737d19bd7f67713de ] Hardware expects RSA input plain/ciphertext to be 32-bit aligned. Set fixed length for preallocated buffer to the maximum supported keysize of the hardware and shift input text accordingly. Signed-off-by: Jia Jie Ho Signed-off-by: Herbert Xu Stable-dep-of: 8323c036789b ("crypto: starfive - Fix nent assignment in rsa dec") Signed-off-by: Sasha Levin --- drivers/crypto/starfive/jh7110-cryp.h | 3 ++- drivers/crypto/starfive/jh7110-rsa.c | 12 ++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h index 494a74f52706..85c65c6c0327 100644 --- a/drivers/crypto/starfive/jh7110-cryp.h +++ b/drivers/crypto/starfive/jh7110-cryp.h @@ -30,6 +30,7 @@ #define MAX_KEY_SIZE SHA512_BLOCK_SIZE #define STARFIVE_AES_IV_LEN AES_BLOCK_SIZE #define STARFIVE_AES_CTR_LEN AES_BLOCK_SIZE +#define STARFIVE_RSA_MAX_KEYSZ 256 union starfive_aes_csr { u32 v; @@ -222,7 +223,7 @@ struct starfive_cryp_request_ctx { unsigned int digsize; unsigned long in_sg_len; unsigned char *adata; - u8 rsa_data[] __aligned(sizeof(u32)); + u8 rsa_data[STARFIVE_RSA_MAX_KEYSZ] __aligned(sizeof(u32)); }; struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx); diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c index 33093ba4b13a..59f5979e9360 100644 --- a/drivers/crypto/starfive/jh7110-rsa.c +++ b/drivers/crypto/starfive/jh7110-rsa.c @@ -31,7 +31,6 @@ /* A * A * R mod N ==> A */ #define CRYPTO_CMD_AARN 0x7 -#define STARFIVE_RSA_MAX_KEYSZ 256 #define STARFIVE_RSA_RESET 0x2 static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx) @@ -74,7 +73,7 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx, { struct starfive_cryp_dev *cryp = ctx->cryp; struct starfive_cryp_request_ctx *rctx = ctx->rctx; - int count = rctx->total / sizeof(u32) - 1; + int count = (ALIGN(rctx->total, 4) / 4) - 1; int loop; u32 temp; u8 opsize; @@ -251,12 +250,17 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc) struct starfive_cryp_dev *cryp = ctx->cryp; struct starfive_cryp_request_ctx *rctx = ctx->rctx; struct starfive_rsa_key *key = &ctx->rsa_key; - int ret = 0; + int ret = 0, shift = 0; writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET); + if (!IS_ALIGNED(rctx->total, sizeof(u32))) { + shift = sizeof(u32) - (rctx->total & 0x3); + memset(rctx->rsa_data, 0, shift); + } + rctx->total = sg_copy_to_buffer(rctx->in_sg, rctx->nents, - rctx->rsa_data, rctx->total); + rctx->rsa_data + shift, rctx->total); if (enc) { key->bitlen = key->e_bitlen; From 17c1e62c7ec2e08dbd679a316d99a86dac8efc7e Mon Sep 17 00:00:00 2001 From: Jia Jie Ho Date: Wed, 26 Jun 2024 09:40:43 +0800 Subject: [PATCH 328/374] crypto: starfive - Fix nent assignment in rsa dec [ Upstream commit 8323c036789b8b4a61925fce439a89dba17b7f2f ] Missing src scatterlist nent assignment in rsa decrypt function. Removing all unneeded assignment and use nents value from req->src instead. Signed-off-by: Jia Jie Ho Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin --- drivers/crypto/starfive/jh7110-cryp.h | 1 - drivers/crypto/starfive/jh7110-rsa.c | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h index 85c65c6c0327..5ed4ba5da7f9 100644 --- a/drivers/crypto/starfive/jh7110-cryp.h +++ b/drivers/crypto/starfive/jh7110-cryp.h @@ -218,7 +218,6 @@ struct starfive_cryp_request_ctx { struct scatterlist *out_sg; struct ahash_request ahash_fbk_req; size_t total; - size_t nents; unsigned int blksize; unsigned int digsize; unsigned long in_sg_len; diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c index 59f5979e9360..a778c4846025 100644 --- a/drivers/crypto/starfive/jh7110-rsa.c +++ b/drivers/crypto/starfive/jh7110-rsa.c @@ -259,7 +259,7 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc) memset(rctx->rsa_data, 0, shift); } - rctx->total = sg_copy_to_buffer(rctx->in_sg, rctx->nents, + rctx->total = sg_copy_to_buffer(rctx->in_sg, sg_nents(rctx->in_sg), rctx->rsa_data + shift, rctx->total); if (enc) { @@ -309,7 +309,6 @@ static int starfive_rsa_enc(struct akcipher_request *req) rctx->in_sg = req->src; rctx->out_sg = req->dst; rctx->total = req->src_len; - rctx->nents = sg_nents(rctx->in_sg); ctx->rctx = rctx; return starfive_rsa_enc_core(ctx, 1); From f5ce0b408124e7a3870ef083d4a100116f1345c0 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jul 2024 11:51:07 +0200 Subject: [PATCH 329/374] hid: bpf: add BPF_JIT dependency [ Upstream commit bacc15e010fc5a235fb2020b06a29a9961b5db82 ] The module does not do anything when the JIT is disabled, but instead causes a warning: In file included from include/linux/bpf_verifier.h:7, from drivers/hid/bpf/hid_bpf_struct_ops.c:10: drivers/hid/bpf/hid_bpf_struct_ops.c: In function 'hid_bpf_struct_ops_init': include/linux/bpf.h:1853:50: error: statement with no effect [-Werror=unused-value] 1853 | #define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; }) | ^~~~~~~~~~~~~~~~ drivers/hid/bpf/hid_bpf_struct_ops.c:305:16: note: in expansion of macro 'register_bpf_struct_ops' 305 | return register_bpf_struct_ops(&bpf_hid_bpf_ops, hid_bpf_ops); | ^~~~~~~~~~~~~~~~~~~~~~~ Add a Kconfig dependency to only allow building the HID-BPF support when a JIT is enabled. Fixes: ebc0d8093e8c ("HID: bpf: implement HID-BPF through bpf_struct_ops") Signed-off-by: Arnd Bergmann Link: https://patch.msgid.link/96a00b6f-eb81-4c67-8c4b-6b1f3f045034@app.fastmail.com Signed-off-by: Benjamin Tissoires Signed-off-by: Sasha Levin --- drivers/hid/bpf/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/hid/bpf/Kconfig b/drivers/hid/bpf/Kconfig index 83214bae6768..d65482e02a6c 100644 --- a/drivers/hid/bpf/Kconfig +++ b/drivers/hid/bpf/Kconfig @@ -3,7 +3,7 @@ menu "HID-BPF support" config HID_BPF bool "HID-BPF support" - depends on BPF + depends on BPF_JIT depends on BPF_SYSCALL depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS help From c26243db74c1ec2f609a954bfd6d188b1812ac62 Mon Sep 17 00:00:00 2001 From: Yoray Zack Date: Tue, 4 Jun 2024 00:22:17 +0300 Subject: [PATCH 330/374] net/mlx5e: SHAMPO, Use KSMs instead of KLMs [ Upstream commit 758191c9ea7bcc45dd99398a538ae4ab27c4029e ] KSM Mkey is KLM Mkey with a fixed buffer size. Due to this fact, it is a faster mechanism than KLM. SHAMPO feature used KLMs Mkeys for memory mappings of its headers buffer. As it used KLMs with the same buffer size for each entry, we can use KSMs instead. This commit changes the Mkeys that map the SHAMPO headers buffer from KLMs to KSMs. Signed-off-by: Yoray Zack Signed-off-by: Tariq Toukan Link: https://lore.kernel.org/r/20240603212219.1037656-13-tariqt@nvidia.com Signed-off-by: Jakub Kicinski Stable-dep-of: f232de7cdb4b ("net/mlx5e: SHAMPO, Fix page leak") Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 20 +----- .../ethernet/mellanox/mlx5/core/en/params.c | 12 ++-- .../net/ethernet/mellanox/mlx5/core/en/txrx.h | 19 ++++++ .../net/ethernet/mellanox/mlx5/core/en_main.c | 21 +++--- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 65 +++++++++---------- include/linux/mlx5/device.h | 1 + 6 files changed, 71 insertions(+), 67 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e85fb71bf0b4..3cebc3a435db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -80,6 +80,7 @@ struct page_pool; SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define MLX5E_RX_MAX_HEAD (256) +#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8) #define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9) #define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64) @@ -146,25 +147,6 @@ struct page_pool; #define MLX5E_TX_XSK_POLL_BUDGET 64 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ -#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\ - (sizeof(struct mlx5e_umr_wqe) +\ - (sizeof(struct mlx5_klm) * (sgl_len))) - -#define MLX5E_KLM_UMR_WQEBBS(klm_entries) \ - (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB)) - -#define MLX5E_KLM_UMR_DS_CNT(klm_entries)\ - (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS)) - -#define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\ - (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm)) - -#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\ - ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT) - -#define MLX5E_MAX_KLM_PER_WQE(mdev) \ - MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev)) - #define mlx5e_state_dereference(priv, p) \ rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index ec819dfc98be..6c9ccccca81e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -1071,18 +1071,18 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_rq_param *rq_param) { - int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest; + int max_num_of_umr_per_wqe, max_hd_per_wqe, max_ksm_per_umr, rest; void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); u32 wqebbs; - max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev); + max_ksm_per_umr = MLX5E_MAX_KSM_PER_WQE(mdev); max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); - max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr; - rest = max_hd_per_wqe % max_klm_per_umr; - wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe; + max_num_of_umr_per_wqe = max_hd_per_wqe / max_ksm_per_umr; + rest = max_hd_per_wqe % max_ksm_per_umr; + wqebbs = MLX5E_KSM_UMR_WQEBBS(max_ksm_per_umr) * max_num_of_umr_per_wqe; if (rest) - wqebbs += MLX5E_KLM_UMR_WQEBBS(rest); + wqebbs += MLX5E_KSM_UMR_WQEBBS(rest); wqebbs *= wq_size; return wqebbs; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 879d698b6119..d1f0f868d494 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -34,6 +34,25 @@ #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) +#define MLX5E_KSM_UMR_WQE_SZ(sgl_len)\ + (sizeof(struct mlx5e_umr_wqe) +\ + (sizeof(struct mlx5_ksm) * (sgl_len))) + +#define MLX5E_KSM_UMR_WQEBBS(ksm_entries) \ + (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_BB)) + +#define MLX5E_KSM_UMR_DS_CNT(ksm_entries)\ + (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_DS)) + +#define MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size)\ + (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_ksm)) + +#define MLX5E_KSM_ENTRIES_PER_WQE(wqe_size)\ + ALIGN_DOWN(MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT) + +#define MLX5E_MAX_KSM_PER_WQE(mdev) \ + MLX5E_KSM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev)) + static inline ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 409f525f1703..632129de24ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -504,8 +504,8 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, return err; } -static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev, - u64 nentries, +static int mlx5e_create_umr_ksm_mkey(struct mlx5_core_dev *mdev, + u64 nentries, u8 log_entry_size, u32 *umr_mkey) { int inlen; @@ -525,12 +525,13 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev, MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); - MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KSM); mlx5e_mkey_set_relaxed_ordering(mdev, mkc); MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn); MLX5_SET(mkc, mkc, translations_octword_size, nentries); - MLX5_SET(mkc, mkc, length64, 1); + MLX5_SET(mkc, mkc, log_page_size, log_entry_size); + MLX5_SET64(mkc, mkc, len, nentries << log_entry_size); err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); kvfree(in); @@ -565,14 +566,16 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) { - u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size)); + u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size)); - if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) { - mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n", - max_klm_size, rq->mpwqe.shampo->hd_per_wq); + if (max_ksm_size < rq->mpwqe.shampo->hd_per_wq) { + mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n", + max_ksm_size, rq->mpwqe.shampo->hd_per_wq); return -EINVAL; } - return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq, + + return mlx5e_create_umr_ksm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq, + MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE, &rq->mpwqe.shampo->mkey); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0138f77eaeed..2df96648e3f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -619,25 +619,25 @@ static int bitmap_find_window(unsigned long *bitmap, int len, return min(len, count); } -static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, - __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs) +static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, + __be32 key, u16 offset, u16 ksm_len) { - memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms)); + memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms)); umr_wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); umr_wqe->ctrl.umr_mkey = key; umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) - | MLX5E_KLM_UMR_DS_CNT(klm_len)); + | MLX5E_KSM_UMR_DS_CNT(ksm_len)); umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); - umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len); + umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len); umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, - u16 klm_entries, u16 index) + u16 ksm_entries, u16 index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; u16 entries, pi, header_offset, err, wqe_bbs, new_entries; @@ -650,20 +650,20 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, int headroom, i; headroom = rq->buff.headroom; - new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1)); - entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT); - wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries); + new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); + entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); + wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries); pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs); umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); - build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs); + build_ksm_umr(sq, umr_wqe, shampo->key, index, entries); frag_page = &shampo->pages[page_index]; for (i = 0; i < entries; i++, index++) { dma_info = &shampo->info[index]; - if (i >= klm_entries || (index < shampo->pi && shampo->pi - index < - MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)) - goto update_klm; + if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index < + MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)) + goto update_ksm; header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; if (!(header_offset & (PAGE_SIZE - 1))) { @@ -683,12 +683,11 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, dma_info->frag_page = frag_page; } -update_klm: - umr_wqe->inline_klms[i].bcount = - cpu_to_be32(MLX5E_RX_MAX_HEAD); - umr_wqe->inline_klms[i].key = cpu_to_be32(lkey); - umr_wqe->inline_klms[i].va = - cpu_to_be64(dma_info->addr + headroom); +update_ksm: + umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { + .key = cpu_to_be32(lkey), + .va = cpu_to_be64(dma_info->addr + headroom), + }; } sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { @@ -720,37 +719,37 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u16 klm_entries, num_wqe, index, entries_before; + u16 ksm_entries, num_wqe, index, entries_before; struct mlx5e_icosq *sq = rq->icosq; - int i, err, max_klm_entries, len; + int i, err, max_ksm_entries, len; - max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev); - klm_entries = bitmap_find_window(shampo->bitmap, + max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev); + ksm_entries = bitmap_find_window(shampo->bitmap, shampo->hd_per_wqe, shampo->hd_per_wq, shampo->pi); - if (!klm_entries) + if (!ksm_entries) return 0; - klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1)); - index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT); + ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); + index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); entries_before = shampo->hd_per_wq - index; - if (unlikely(entries_before < klm_entries)) - num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) + - DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries); + if (unlikely(entries_before < ksm_entries)) + num_wqe = DIV_ROUND_UP(entries_before, max_ksm_entries) + + DIV_ROUND_UP(ksm_entries - entries_before, max_ksm_entries); else - num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries); + num_wqe = DIV_ROUND_UP(ksm_entries, max_ksm_entries); for (i = 0; i < num_wqe; i++) { - len = (klm_entries > max_klm_entries) ? max_klm_entries : - klm_entries; + len = (ksm_entries > max_ksm_entries) ? max_ksm_entries : + ksm_entries; if (unlikely(index + len > shampo->hd_per_wq)) len = shampo->hd_per_wq - index; err = mlx5e_build_shampo_hd_umr(rq, sq, len, index); if (unlikely(err)) return err; index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1); - klm_entries -= len; + ksm_entries -= len; } return 0; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index d7bb31d9a446..da09bfaa7b81 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -294,6 +294,7 @@ enum { #define MLX5_UMR_FLEX_ALIGNMENT 0x40 #define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt)) #define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm)) +#define MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_ksm)) #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8) From e70f6ac7b761d261cffcbff60cf9f23cde56bd6b Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 15 Aug 2024 10:16:08 +0300 Subject: [PATCH 331/374] net/mlx5e: SHAMPO, Fix page leak [ Upstream commit f232de7cdb4b99adb2c7f2bc5e0b7e4e1292873b ] When SHAMPO is used, a receive queue currently almost always leaks one page on shutdown. A page has MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (8) headers. These headers are tracked in the SHAMPO bitmap. Each page is released when the last header index in the group is processed. During header allocation, there can be leftovers from a page that will be used in a subsequent allocation. This is normally fine, except for the following scenario (simplified a bit): 1) Allocate N new page fragments, showing only the relevant last 4 fragments: 0: new page 1: new page 2: new page 3: new page 4: page from previous allocation 5: page from previous allocation 6: page from previous allocation 7: page from previous allocation 2) NAPI processes header indices 4-7 because they are the oldest allocated. Bit 7 will be set to 0. 3) Receive queue shutdown occurs. All the remaining bits are being iterated on to release the pages. But the page assigned to header indices 0-3 will not be freed due to what happened in step 2. This patch fixes the issue by making sure that on allocation, header fragments are always allocated in groups of MLX5E_SHAMPO_WQ_HEADER_PER_PAGE so that there is never a partial page left over between allocations. A more appropriate fix would be a refactoring of mlx5e_alloc_rx_hd_mpwqe() and mlx5e_build_shampo_hd_umr(). But this refactoring is too big for net. It will be targeted for net-next. Fixes: e839ac9a89cb ("net/mlx5e: SHAMPO, Simplify header page release in teardown") Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20240815071611.2211873-2-tariqt@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 2df96648e3f4..cbc45dc34a60 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -727,6 +727,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) ksm_entries = bitmap_find_window(shampo->bitmap, shampo->hd_per_wqe, shampo->hd_per_wq, shampo->pi); + ksm_entries = ALIGN_DOWN(ksm_entries, MLX5E_SHAMPO_WQ_HEADER_PER_PAGE); if (!ksm_entries) return 0; From 2cb699051873a990d633a9eb18f35a94b8e29f45 Mon Sep 17 00:00:00 2001 From: Bommu Krishnaiah Date: Thu, 18 Apr 2024 16:45:34 +0530 Subject: [PATCH 332/374] drm/xe/xe2: Add workaround 14021402888 [ Upstream commit 598dc939edf8d7bb1d69e84513c31451812128fc ] This workaround applies to Graphics 20.01 as RCS engine workaround Signed-off-by: Bommu Krishnaiah Cc: Tejas Upadhyay Cc: Matt Roper Cc: Himal Prasad Ghimiray Reviewed-by: Himal Prasad Ghimiray Link: https://patchwork.freedesktop.org/patch/msgid/20240418111534.481568-1-krishnaiah.bommu@intel.com Signed-off-by: Lucas De Marchi Stable-dep-of: b196e6fcc711 ("drm/xe/xe2lpg: Extend workaround 14021402888") Signed-off-by: Sasha Levin --- drivers/gpu/drm/xe/regs/xe_gt_regs.h | 1 + drivers/gpu/drm/xe/xe_wa.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 94445810ccc9..23c302af4cd5 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -350,6 +350,7 @@ #define HALF_SLICE_CHICKEN7 XE_REG_MCR(0xe194, XE_REG_OPTION_MASKED) #define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15) +#define CLEAR_OPTIMIZATION_DISABLE REG_BIT(6) #define CACHE_MODE_SS XE_REG_MCR(0xe420, XE_REG_OPTION_MASKED) #define DISABLE_ECC REG_BIT(5) diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index dd214d95e4b6..303f18ce91e6 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -533,6 +533,10 @@ static const struct xe_rtp_entry_sr engine_was[] = { FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, WR_REQ_CHAINING_DIS)) }, + { XE_RTP_NAME("14021402888"), + XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) + }, /* Xe2_HPM */ From 101c626098864e8774e3d88357b28e591176f7ed Mon Sep 17 00:00:00 2001 From: Bommu Krishnaiah Date: Wed, 3 Jul 2024 14:37:54 +0530 Subject: [PATCH 333/374] drm/xe/xe2lpg: Extend workaround 14021402888 [ Upstream commit b196e6fcc71186134b4cfe756067d87ae41b1ed9 ] workaround 14021402888 also applies to Xe2_LPG. Replicate the existing entry to one specific for Xe2_LPG. Signed-off-by: Bommu Krishnaiah Cc: Tejas Upadhyay Cc: Matt Roper Cc: Himal Prasad Ghimiray Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240703090754.1323647-1-krishnaiah.bommu@intel.com Signed-off-by: Lucas De Marchi (cherry picked from commit 56ab6986992ba143aee0bda33e15a764343e271d) Signed-off-by: Rodrigo Vivi Signed-off-by: Sasha Levin --- drivers/gpu/drm/xe/xe_wa.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 303f18ce91e6..66dafe980b9c 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -485,6 +485,10 @@ static const struct xe_rtp_entry_sr engine_was[] = { XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, SLM_WMTP_RESTORE)) }, + { XE_RTP_NAME("14021402888"), + XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) + }, /* Xe2_HPG */ From 7b7a9dac38785647db707cd69730462cbd653cd3 Mon Sep 17 00:00:00 2001 From: Abel Vesa Date: Thu, 1 Aug 2024 13:21:07 +0300 Subject: [PATCH 334/374] clk: qcom: gcc-x1e80100: Fix USB 0 and 1 PHY GDSC pwrsts flags [ Upstream commit f4c16a7cdbd2edecdb854f2ce0ef07c6263c5379 ] Allowing these GDSCs to collapse makes the QMP combo PHYs lose their configuration on machine suspend. Currently, the QMP combo PHY driver doesn't reinitialise the HW on resume. Under such conditions, the USB SuperSpeed support is broken. To avoid this, mark the pwrsts flags with RET_ON. This is in line with USB 2 PHY GDSC config. Fixes: 161b7c401f4b ("clk: qcom: Add Global Clock controller (GCC) driver for X1E80100") Signed-off-by: Abel Vesa Link: https://lore.kernel.org/r/20240801-x1e80100-clk-gcc-fix-usb-phy-gdscs-pwrsts-v1-1-8df016768a0f@linaro.org Signed-off-by: Bjorn Andersson Signed-off-by: Sasha Levin --- drivers/clk/qcom/gcc-x1e80100.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c index a263f0c412f5..24f84c6705e5 100644 --- a/drivers/clk/qcom/gcc-x1e80100.c +++ b/drivers/clk/qcom/gcc-x1e80100.c @@ -6203,7 +6203,7 @@ static struct gdsc gcc_usb_0_phy_gdsc = { .pd = { .name = "gcc_usb_0_phy_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, }; @@ -6215,7 +6215,7 @@ static struct gdsc gcc_usb_1_phy_gdsc = { .pd = { .name = "gcc_usb_1_phy_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, }; From 70eee39bbd0501ad7cb38f3561b8fc0533df1854 Mon Sep 17 00:00:00 2001 From: devi priya Date: Tue, 6 Aug 2024 11:41:05 +0530 Subject: [PATCH 335/374] clk: qcom: ipq9574: Update the alpha PLL type for GPLLs [ Upstream commit 6357efe3abead68048729adf11a9363881657939 ] Update PLL offsets to DEFAULT_EVO to configure MDIO to 800MHz. The incorrect clock frequency leads to an incorrect MDIO clock. This, in turn, affects the MDIO hardware configurations as the divider is calculated from the MDIO clock frequency. If the clock frequency is not as expected, the MDIO register fails due to the generation of an incorrect MDIO frequency. This issue is critical as it results in incorrect MDIO configurations and ultimately leads to the MDIO function not working. This results in a complete feature failure affecting all Ethernet PHYs. Specifically, Ethernet will not work on IPQ9574 due to this issue. Currently, the clock frequency is set to CLK_ALPHA_PLL_TYPE_DEFAULT. However, this setting does not yield the expected clock frequency. To rectify this, we need to change this to CLK_ALPHA_PLL_TYPE_DEFAULT_EVO. This modification ensures that the clock frequency aligns with our expectations, thereby resolving the MDIO register failure and ensuring the proper functioning of the Ethernet on IPQ9574. Fixes: d75b82cff488 ("clk: qcom: Add Global Clock Controller driver for IPQ9574") Signed-off-by: devi priya Signed-off-by: Amandeep Singh Link: https://lore.kernel.org/r/20240806061105.2849944-1-quic_amansing@quicinc.com Signed-off-by: Bjorn Andersson Signed-off-by: Sasha Levin --- drivers/clk/qcom/gcc-ipq9574.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c index f8b9a1e93bef..cdbbf2cc9c5d 100644 --- a/drivers/clk/qcom/gcc-ipq9574.c +++ b/drivers/clk/qcom/gcc-ipq9574.c @@ -65,7 +65,7 @@ static const struct clk_parent_data gcc_sleep_clk_data[] = { static struct clk_alpha_pll gpll0_main = { .offset = 0x20000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], .clkr = { .enable_reg = 0x0b000, .enable_mask = BIT(0), @@ -93,7 +93,7 @@ static struct clk_fixed_factor gpll0_out_main_div2 = { static struct clk_alpha_pll_postdiv gpll0 = { .offset = 0x20000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "gpll0", @@ -107,7 +107,7 @@ static struct clk_alpha_pll_postdiv gpll0 = { static struct clk_alpha_pll gpll4_main = { .offset = 0x22000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], .clkr = { .enable_reg = 0x0b000, .enable_mask = BIT(2), @@ -122,7 +122,7 @@ static struct clk_alpha_pll gpll4_main = { static struct clk_alpha_pll_postdiv gpll4 = { .offset = 0x22000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "gpll4", @@ -136,7 +136,7 @@ static struct clk_alpha_pll_postdiv gpll4 = { static struct clk_alpha_pll gpll2_main = { .offset = 0x21000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], .clkr = { .enable_reg = 0x0b000, .enable_mask = BIT(1), @@ -151,7 +151,7 @@ static struct clk_alpha_pll gpll2_main = { static struct clk_alpha_pll_postdiv gpll2 = { .offset = 0x21000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "gpll2", From 4429f9807e6303f2bf5ace699f33173fe5c958f4 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 2 Jul 2024 15:51:13 +0200 Subject: [PATCH 336/374] powerpc/64e: remove unused IBM HTW code [ Upstream commit 88715b6e5d529f4ef3830ad2a893e4624c6af0b8 ] Patch series "Reimplement huge pages without hugepd on powerpc (8xx, e500, book3s/64)", v7. Unlike most architectures, powerpc 8xx HW requires a two-level pagetable topology for all page sizes. So a leaf PMD-contig approach is not feasible as such. Possible sizes on 8xx are 4k, 16k, 512k and 8M. First level (PGD/PMD) covers 4M per entry. For 8M pages, two PMD entries must point to a single entry level-2 page table. Until now that was done using hugepd. This series changes it to use standard page tables where the entry is replicated 1024 times on each of the two pagetables refered by the two associated PMD entries for that 8M page. For e500 and book3s/64 there are less constraints because it is not tied to the HW assisted tablewalk like on 8xx, so it is easier to use leaf PMDs (and PUDs). On e500 the supported page sizes are 4M, 16M, 64M, 256M and 1G. All at PMD level on e500/32 (mpc85xx) and mix of PMD and PUD for e500/64. We encode page size with 4 available bits in PTE entries. On e300/32 PGD entries size is increases to 64 bits in order to allow leaf-PMD entries because PTE are 64 bits on e500. On book3s/64 only the hash-4k mode is concerned. It supports 16M pages as cont-PMD and 16G pages as cont-PUD. In other modes (radix-4k, radix-6k and hash-64k) the sizes match with PMD and PUD sizes so that's just leaf entries. The hash processing make things a bit more complex. To ease things, __hash_page_huge() is modified to bail out when DIRTY or ACCESSED bits are missing, leaving it to mm core to fix it. This patch (of 23): The nohash HTW_IBM (Hardware Table Walk) code is unused since support for A2 was removed in commit fb5a515704d7 ("powerpc: Remove platforms/ wsp and associated pieces") (2014). The remaining supported CPUs use either no HTW (data_tlb_miss_bolted), or the e6500 HTW (data_tlb_miss_e6500). Link: https://lkml.kernel.org/r/cover.1719928057.git.christophe.leroy@csgroup.eu Link: https://lkml.kernel.org/r/820dd1385ecc931f07b0d7a0fa827b1613917ab6.1719928057.git.christophe.leroy@csgroup.eu Signed-off-by: Michael Ellerman Signed-off-by: Christophe Leroy Cc: Jason Gunthorpe Cc: Nicholas Piggin Cc: Oscar Salvador Cc: Peter Xu Signed-off-by: Andrew Morton Stable-dep-of: d92b5cc29c79 ("powerpc/64e: Define mmu_pte_psize static") Signed-off-by: Sasha Levin --- arch/powerpc/include/asm/nohash/mmu-e500.h | 3 +- arch/powerpc/mm/nohash/tlb.c | 57 +----- arch/powerpc/mm/nohash/tlb_low_64e.S | 195 --------------------- 3 files changed, 2 insertions(+), 253 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/mmu-e500.h b/arch/powerpc/include/asm/nohash/mmu-e500.h index 6ddced0415cb..7dc24b8632d7 100644 --- a/arch/powerpc/include/asm/nohash/mmu-e500.h +++ b/arch/powerpc/include/asm/nohash/mmu-e500.h @@ -303,8 +303,7 @@ extern unsigned long linear_map_top; extern int book3e_htw_mode; #define PPC_HTW_NONE 0 -#define PPC_HTW_IBM 1 -#define PPC_HTW_E6500 2 +#define PPC_HTW_E6500 1 /* * 64-bit booke platforms don't load the tlb in the tlb miss handler code. diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index 5ffa0af4328a..a5bb87ec8578 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -400,9 +400,8 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) static void __init setup_page_sizes(void) { unsigned int tlb0cfg; - unsigned int tlb0ps; unsigned int eptcfg; - int i, psize; + int psize; #ifdef CONFIG_PPC_E500 unsigned int mmucfg = mfspr(SPRN_MMUCFG); @@ -471,50 +470,6 @@ static void __init setup_page_sizes(void) goto out; } #endif - - tlb0cfg = mfspr(SPRN_TLB0CFG); - tlb0ps = mfspr(SPRN_TLB0PS); - eptcfg = mfspr(SPRN_EPTCFG); - - /* Look for supported direct sizes */ - for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { - struct mmu_psize_def *def = &mmu_psize_defs[psize]; - - if (tlb0ps & (1U << (def->shift - 10))) - def->flags |= MMU_PAGE_SIZE_DIRECT; - } - - /* Indirect page sizes supported ? */ - if ((tlb0cfg & TLBnCFG_IND) == 0 || - (tlb0cfg & TLBnCFG_PT) == 0) - goto out; - - book3e_htw_mode = PPC_HTW_IBM; - - /* Now, we only deal with one IND page size for each - * direct size. Hopefully all implementations today are - * unambiguous, but we might want to be careful in the - * future. - */ - for (i = 0; i < 3; i++) { - unsigned int ps, sps; - - sps = eptcfg & 0x1f; - eptcfg >>= 5; - ps = eptcfg & 0x1f; - eptcfg >>= 5; - if (!ps || !sps) - continue; - for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { - struct mmu_psize_def *def = &mmu_psize_defs[psize]; - - if (ps == (def->shift - 10)) - def->flags |= MMU_PAGE_SIZE_INDIRECT; - if (sps == (def->shift - 10)) - def->ind = ps + 10; - } - } - out: /* Cleanup array and print summary */ pr_info("MMU: Supported page sizes\n"); @@ -543,10 +498,6 @@ static void __init setup_mmu_htw(void) */ switch (book3e_htw_mode) { - case PPC_HTW_IBM: - patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); - patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); - break; #ifdef CONFIG_PPC_E500 case PPC_HTW_E6500: extlb_level_exc = EX_TLB_SIZE; @@ -577,12 +528,6 @@ static void early_init_this_mmu(void) mmu_pte_psize = MMU_PAGE_2M; break; - case PPC_HTW_IBM: - mas4 |= MAS4_INDD; - mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; - mmu_pte_psize = MMU_PAGE_1M; - break; - case PPC_HTW_NONE: mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; mmu_pte_psize = mmu_virtual_psize; diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S index 7e0b8fe1c279..b0eb3f7eaed1 100644 --- a/arch/powerpc/mm/nohash/tlb_low_64e.S +++ b/arch/powerpc/mm/nohash/tlb_low_64e.S @@ -893,201 +893,6 @@ virt_page_table_tlb_miss_whacko_fault: TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e - -/************************************************************** - * * - * TLB miss handling for Book3E with hw page table support * - * * - **************************************************************/ - - -/* Data TLB miss */ - START_EXCEPTION(data_tlb_miss_htw) - TLB_MISS_PROLOG - - /* Now we handle the fault proper. We only save DEAR in normal - * fault case since that's the only interesting values here. - * We could probably also optimize by not saving SRR0/1 in the - * linear mapping case but I'll leave that for later - */ - mfspr r14,SPRN_ESR - mfspr r16,SPRN_DEAR /* get faulting address */ - srdi r11,r16,44 /* get region */ - xoris r11,r11,0xc - cmpldi cr0,r11,0 /* linear mapping ? */ - beq tlb_load_linear /* yes -> go to linear map load */ - cmpldi cr1,r11,1 /* vmalloc mapping ? */ - - /* We do the user/kernel test for the PID here along with the RW test - */ - srdi. r11,r16,60 /* Check for user region */ - ld r15,PACAPGD(r13) /* Load user pgdir */ - beq htw_tlb_miss - - /* XXX replace the RMW cycles with immediate loads + writes */ -1: mfspr r10,SPRN_MAS1 - rlwinm r10,r10,0,16,1 /* Clear TID */ - mtspr SPRN_MAS1,r10 - ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */ - beq+ cr1,htw_tlb_miss - - /* We got a crappy address, just fault with whatever DEAR and ESR - * are here - */ - TLB_MISS_EPILOG_ERROR - b exc_data_storage_book3e - -/* Instruction TLB miss */ - START_EXCEPTION(instruction_tlb_miss_htw) - TLB_MISS_PROLOG - - /* If we take a recursive fault, the second level handler may need - * to know whether we are handling a data or instruction fault in - * order to get to the right store fault handler. We provide that - * info by keeping a crazy value for ESR in r14 - */ - li r14,-1 /* store to exception frame is done later */ - - /* Now we handle the fault proper. We only save DEAR in the non - * linear mapping case since we know the linear mapping case will - * not re-enter. We could indeed optimize and also not save SRR0/1 - * in the linear mapping case but I'll leave that for later - * - * Faulting address is SRR0 which is already in r16 - */ - srdi r11,r16,44 /* get region */ - xoris r11,r11,0xc - cmpldi cr0,r11,0 /* linear mapping ? */ - beq tlb_load_linear /* yes -> go to linear map load */ - cmpldi cr1,r11,1 /* vmalloc mapping ? */ - - /* We do the user/kernel test for the PID here along with the RW test - */ - srdi. r11,r16,60 /* Check for user region */ - ld r15,PACAPGD(r13) /* Load user pgdir */ - beq htw_tlb_miss - - /* XXX replace the RMW cycles with immediate loads + writes */ -1: mfspr r10,SPRN_MAS1 - rlwinm r10,r10,0,16,1 /* Clear TID */ - mtspr SPRN_MAS1,r10 - ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */ - beq+ htw_tlb_miss - - /* We got a crappy address, just fault */ - TLB_MISS_EPILOG_ERROR - b exc_instruction_storage_book3e - - -/* - * This is the guts of the second-level TLB miss handler for direct - * misses. We are entered with: - * - * r16 = virtual page table faulting address - * r15 = PGD pointer - * r14 = ESR - * r13 = PACA - * r12 = TLB exception frame in PACA - * r11 = crap (free to use) - * r10 = crap (free to use) - * - * It can be re-entered by the linear mapping miss handler. However, to - * avoid too much complication, it will save/restore things for us - */ -htw_tlb_miss: -#ifdef CONFIG_PPC_KUAP - mfspr r10,SPRN_MAS1 - rlwinm. r10,r10,0,0x3fff0000 - beq- htw_tlb_miss_fault /* KUAP fault */ -#endif - /* Search if we already have a TLB entry for that virtual address, and - * if we do, bail out. - * - * MAS1:IND should be already set based on MAS4 - */ - PPC_TLBSRX_DOT(0,R16) - beq htw_tlb_miss_done - - /* Now, we need to walk the page tables. First check if we are in - * range. - */ - rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 - bne- htw_tlb_miss_fault - - /* Get the PGD pointer */ - cmpldi cr0,r15,0 - beq- htw_tlb_miss_fault - - /* Get to PGD entry */ - rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3 - clrrdi r10,r11,3 - ldx r15,r10,r15 - cmpdi cr0,r15,0 - bge htw_tlb_miss_fault - - /* Get to PUD entry */ - rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3 - clrrdi r10,r11,3 - ldx r15,r10,r15 - cmpdi cr0,r15,0 - bge htw_tlb_miss_fault - - /* Get to PMD entry */ - rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3 - clrrdi r10,r11,3 - ldx r15,r10,r15 - cmpdi cr0,r15,0 - bge htw_tlb_miss_fault - - /* Ok, we're all right, we can now create an indirect entry for - * a 1M or 256M page. - * - * The last trick is now that because we use "half" pages for - * the HTW (1M IND is 2K and 256M IND is 32K) we need to account - * for an added LSB bit to the RPN. For 64K pages, there is no - * problem as we already use 32K arrays (half PTE pages), but for - * 4K page we need to extract a bit from the virtual address and - * insert it into the "PA52" bit of the RPN. - */ - rlwimi r15,r16,32-9,20,20 - /* Now we build the MAS: - * - * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG - * MAS 1 : Almost fully setup - * - PID already updated by caller if necessary - * - TSIZE for now is base ind page size always - * MAS 2 : Use defaults - * MAS 3+7 : Needs to be done - */ - ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT) - - srdi r16,r10,32 - mtspr SPRN_MAS3,r10 - mtspr SPRN_MAS7,r16 - - tlbwe - -htw_tlb_miss_done: - /* We don't bother with restoring DEAR or ESR since we know we are - * level 0 and just going back to userland. They are only needed - * if you are going to take an access fault - */ - TLB_MISS_EPILOG_SUCCESS - rfi - -htw_tlb_miss_fault: - /* We need to check if it was an instruction miss. We know this - * though because r14 would contain -1 - */ - cmpdi cr0,r14,-1 - beq 1f - mtspr SPRN_DEAR,r16 - mtspr SPRN_ESR,r14 - TLB_MISS_EPILOG_ERROR - b exc_data_storage_book3e -1: TLB_MISS_EPILOG_ERROR - b exc_instruction_storage_book3e - /* * This is the guts of "any" level TLB miss handler for kernel linear * mapping misses. We are entered with: From 7ebaff701efe21922669bb2cd093ecda3da4e6e6 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 2 Jul 2024 15:51:14 +0200 Subject: [PATCH 337/374] powerpc/64e: split out nohash Book3E 64-bit code [ Upstream commit a898530eea3d0ba08c17a60865995a3bb468d1bc ] A reasonable chunk of nohash/tlb.c is 64-bit only code, split it out into a separate file. Link: https://lkml.kernel.org/r/cb2b118f9d8a86f82d01bfb9ad309d1d304480a1.1719928057.git.christophe.leroy@csgroup.eu Signed-off-by: Michael Ellerman Signed-off-by: Christophe Leroy Cc: Jason Gunthorpe Cc: Nicholas Piggin Cc: Oscar Salvador Cc: Peter Xu Signed-off-by: Andrew Morton Stable-dep-of: d92b5cc29c79 ("powerpc/64e: Define mmu_pte_psize static") Signed-off-by: Sasha Levin --- arch/powerpc/mm/nohash/Makefile | 2 +- arch/powerpc/mm/nohash/tlb.c | 343 +---------------------------- arch/powerpc/mm/nohash/tlb_64e.c | 361 +++++++++++++++++++++++++++++++ 3 files changed, 363 insertions(+), 343 deletions(-) create mode 100644 arch/powerpc/mm/nohash/tlb_64e.c diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile index b3f0498dd42f..90e846f0c46c 100644 --- a/arch/powerpc/mm/nohash/Makefile +++ b/arch/powerpc/mm/nohash/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += mmu_context.o tlb.o tlb_low.o kup.o -obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o +obj-$(CONFIG_PPC_BOOK3E_64) += tlb_64e.o tlb_low_64e.o book3e_pgtable.o obj-$(CONFIG_40x) += 40x.o obj-$(CONFIG_44x) += 44x.o obj-$(CONFIG_PPC_8xx) += 8xx.o diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index a5bb87ec8578..f57dc721d063 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -110,28 +110,6 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { }; #endif -/* The variables below are currently only used on 64-bit Book3E - * though this will probably be made common with other nohash - * implementations at some point - */ -#ifdef CONFIG_PPC64 - -int mmu_pte_psize; /* Page size used for PTE pages */ -int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ -int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ -unsigned long linear_map_top; /* Top of linear mapping */ - - -/* - * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug - * exceptions. This is used for bolted and e6500 TLB miss handlers which - * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, - * this is set to zero. - */ -int extlb_level_exc; - -#endif /* CONFIG_PPC64 */ - #ifdef CONFIG_PPC_E500 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ DEFINE_PER_CPU(int, next_tlbcam_idx); @@ -358,326 +336,7 @@ void tlb_flush(struct mmu_gather *tlb) flush_tlb_mm(tlb->mm); } -/* - * Below are functions specific to the 64-bit variant of Book3E though that - * may change in the future - */ - -#ifdef CONFIG_PPC64 - -/* - * Handling of virtual linear page tables or indirect TLB entries - * flushing when PTE pages are freed - */ -void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) -{ - int tsize = mmu_psize_defs[mmu_pte_psize].enc; - - if (book3e_htw_mode != PPC_HTW_NONE) { - unsigned long start = address & PMD_MASK; - unsigned long end = address + PMD_SIZE; - unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; - - /* This isn't the most optimal, ideally we would factor out the - * while preempt & CPU mask mucking around, or even the IPI but - * it will do for now - */ - while (start < end) { - __flush_tlb_page(tlb->mm, start, tsize, 1); - start += size; - } - } else { - unsigned long rmask = 0xf000000000000000ul; - unsigned long rid = (address & rmask) | 0x1000000000000000ul; - unsigned long vpte = address & ~rmask; - - vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; - vpte |= rid; - __flush_tlb_page(tlb->mm, vpte, tsize, 0); - } -} - -static void __init setup_page_sizes(void) -{ - unsigned int tlb0cfg; - unsigned int eptcfg; - int psize; - -#ifdef CONFIG_PPC_E500 - unsigned int mmucfg = mfspr(SPRN_MMUCFG); - int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); - - if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { - unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); - unsigned int min_pg, max_pg; - - min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; - max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; - - for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { - struct mmu_psize_def *def; - unsigned int shift; - - def = &mmu_psize_defs[psize]; - shift = def->shift; - - if (shift == 0 || shift & 1) - continue; - - /* adjust to be in terms of 4^shift Kb */ - shift = (shift - 10) >> 1; - - if ((shift >= min_pg) && (shift <= max_pg)) - def->flags |= MMU_PAGE_SIZE_DIRECT; - } - - goto out; - } - - if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { - u32 tlb1cfg, tlb1ps; - - tlb0cfg = mfspr(SPRN_TLB0CFG); - tlb1cfg = mfspr(SPRN_TLB1CFG); - tlb1ps = mfspr(SPRN_TLB1PS); - eptcfg = mfspr(SPRN_EPTCFG); - - if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) - book3e_htw_mode = PPC_HTW_E6500; - - /* - * We expect 4K subpage size and unrestricted indirect size. - * The lack of a restriction on indirect size is a Freescale - * extension, indicated by PSn = 0 but SPSn != 0. - */ - if (eptcfg != 2) - book3e_htw_mode = PPC_HTW_NONE; - - for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { - struct mmu_psize_def *def = &mmu_psize_defs[psize]; - - if (!def->shift) - continue; - - if (tlb1ps & (1U << (def->shift - 10))) { - def->flags |= MMU_PAGE_SIZE_DIRECT; - - if (book3e_htw_mode && psize == MMU_PAGE_2M) - def->flags |= MMU_PAGE_SIZE_INDIRECT; - } - } - - goto out; - } -#endif -out: - /* Cleanup array and print summary */ - pr_info("MMU: Supported page sizes\n"); - for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { - struct mmu_psize_def *def = &mmu_psize_defs[psize]; - const char *__page_type_names[] = { - "unsupported", - "direct", - "indirect", - "direct & indirect" - }; - if (def->flags == 0) { - def->shift = 0; - continue; - } - pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), - __page_type_names[def->flags & 0x3]); - } -} - -static void __init setup_mmu_htw(void) -{ - /* - * If we want to use HW tablewalk, enable it by patching the TLB miss - * handlers to branch to the one dedicated to it. - */ - - switch (book3e_htw_mode) { -#ifdef CONFIG_PPC_E500 - case PPC_HTW_E6500: - extlb_level_exc = EX_TLB_SIZE; - patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); - patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); - break; -#endif - } - pr_info("MMU: Book3E HW tablewalk %s\n", - book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); -} - -/* - * Early initialization of the MMU TLB code - */ -static void early_init_this_mmu(void) -{ - unsigned int mas4; - - /* Set MAS4 based on page table setting */ - - mas4 = 0x4 << MAS4_WIMGED_SHIFT; - switch (book3e_htw_mode) { - case PPC_HTW_E6500: - mas4 |= MAS4_INDD; - mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; - mas4 |= MAS4_TLBSELD(1); - mmu_pte_psize = MMU_PAGE_2M; - break; - - case PPC_HTW_NONE: - mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; - mmu_pte_psize = mmu_virtual_psize; - break; - } - mtspr(SPRN_MAS4, mas4); - -#ifdef CONFIG_PPC_E500 - if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { - unsigned int num_cams; - bool map = true; - - /* use a quarter of the TLBCAM for bolted linear map */ - num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; - - /* - * Only do the mapping once per core, or else the - * transient mapping would cause problems. - */ -#ifdef CONFIG_SMP - if (hweight32(get_tensr()) > 1) - map = false; -#endif - - if (map) - linear_map_top = map_mem_in_cams(linear_map_top, - num_cams, false, true); - } -#endif - - /* A sync won't hurt us after mucking around with - * the MMU configuration - */ - mb(); -} - -static void __init early_init_mmu_global(void) -{ - /* XXX This should be decided at runtime based on supported - * page sizes in the TLB, but for now let's assume 16M is - * always there and a good fit (which it probably is) - * - * Freescale booke only supports 4K pages in TLB0, so use that. - */ - if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) - mmu_vmemmap_psize = MMU_PAGE_4K; - else - mmu_vmemmap_psize = MMU_PAGE_16M; - - /* XXX This code only checks for TLB 0 capabilities and doesn't - * check what page size combos are supported by the HW. It - * also doesn't handle the case where a separate array holds - * the IND entries from the array loaded by the PT. - */ - /* Look for supported page sizes */ - setup_page_sizes(); - - /* Look for HW tablewalk support */ - setup_mmu_htw(); - -#ifdef CONFIG_PPC_E500 - if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { - if (book3e_htw_mode == PPC_HTW_NONE) { - extlb_level_exc = EX_TLB_SIZE; - patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); - patch_exception(0x1e0, - exc_instruction_tlb_miss_bolted_book3e); - } - } -#endif - - /* Set the global containing the top of the linear mapping - * for use by the TLB miss code - */ - linear_map_top = memblock_end_of_DRAM(); - - ioremap_bot = IOREMAP_BASE; -} - -static void __init early_mmu_set_memory_limit(void) -{ -#ifdef CONFIG_PPC_E500 - if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { - /* - * Limit memory so we dont have linear faults. - * Unlike memblock_set_current_limit, which limits - * memory available during early boot, this permanently - * reduces the memory available to Linux. We need to - * do this because highmem is not supported on 64-bit. - */ - memblock_enforce_memory_limit(linear_map_top); - } -#endif - - memblock_set_current_limit(linear_map_top); -} - -/* boot cpu only */ -void __init early_init_mmu(void) -{ - early_init_mmu_global(); - early_init_this_mmu(); - early_mmu_set_memory_limit(); -} - -void early_init_mmu_secondary(void) -{ - early_init_this_mmu(); -} - -void setup_initial_memory_limit(phys_addr_t first_memblock_base, - phys_addr_t first_memblock_size) -{ - /* On non-FSL Embedded 64-bit, we adjust the RMA size to match - * the bolted TLB entry. We know for now that only 1G - * entries are supported though that may eventually - * change. - * - * on FSL Embedded 64-bit, usually all RAM is bolted, but with - * unusual memory sizes it's possible for some RAM to not be mapped - * (such RAM is not used at all by Linux, since we don't support - * highmem on 64-bit). We limit ppc64_rma_size to what would be - * mappable if this memblock is the only one. Additional memblocks - * can only increase, not decrease, the amount that ends up getting - * mapped. We still limit max to 1G even if we'll eventually map - * more. This is due to what the early init code is set up to do. - * - * We crop it to the size of the first MEMBLOCK to - * avoid going over total available memory just in case... - */ -#ifdef CONFIG_PPC_E500 - if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { - unsigned long linear_sz; - unsigned int num_cams; - - /* use a quarter of the TLBCAM for bolted linear map */ - num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; - - linear_sz = map_mem_in_cams(first_memblock_size, num_cams, - true, true); - - ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); - } else -#endif - ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); - - /* Finally limit subsequent allocations */ - memblock_set_current_limit(first_memblock_base + ppc64_rma_size); -} -#else /* ! CONFIG_PPC64 */ +#ifndef CONFIG_PPC64 void __init early_init_mmu(void) { unsigned long root = of_get_flat_dt_root(); diff --git a/arch/powerpc/mm/nohash/tlb_64e.c b/arch/powerpc/mm/nohash/tlb_64e.c new file mode 100644 index 000000000000..1dcda261554c --- /dev/null +++ b/arch/powerpc/mm/nohash/tlb_64e.c @@ -0,0 +1,361 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2008,2009 Ben Herrenschmidt + * IBM Corp. + * + * Derived from arch/ppc/mm/init.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +/* The variables below are currently only used on 64-bit Book3E + * though this will probably be made common with other nohash + * implementations at some point + */ +int mmu_pte_psize; /* Page size used for PTE pages */ +int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ +int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ +unsigned long linear_map_top; /* Top of linear mapping */ + + +/* + * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug + * exceptions. This is used for bolted and e6500 TLB miss handlers which + * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, + * this is set to zero. + */ +int extlb_level_exc; + +/* + * Handling of virtual linear page tables or indirect TLB entries + * flushing when PTE pages are freed + */ +void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) +{ + int tsize = mmu_psize_defs[mmu_pte_psize].enc; + + if (book3e_htw_mode != PPC_HTW_NONE) { + unsigned long start = address & PMD_MASK; + unsigned long end = address + PMD_SIZE; + unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; + + /* This isn't the most optimal, ideally we would factor out the + * while preempt & CPU mask mucking around, or even the IPI but + * it will do for now + */ + while (start < end) { + __flush_tlb_page(tlb->mm, start, tsize, 1); + start += size; + } + } else { + unsigned long rmask = 0xf000000000000000ul; + unsigned long rid = (address & rmask) | 0x1000000000000000ul; + unsigned long vpte = address & ~rmask; + + vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; + vpte |= rid; + __flush_tlb_page(tlb->mm, vpte, tsize, 0); + } +} + +static void __init setup_page_sizes(void) +{ + unsigned int tlb0cfg; + unsigned int eptcfg; + int psize; + +#ifdef CONFIG_PPC_E500 + unsigned int mmucfg = mfspr(SPRN_MMUCFG); + int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); + + if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { + unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); + unsigned int min_pg, max_pg; + + min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; + max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; + + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { + struct mmu_psize_def *def; + unsigned int shift; + + def = &mmu_psize_defs[psize]; + shift = def->shift; + + if (shift == 0 || shift & 1) + continue; + + /* adjust to be in terms of 4^shift Kb */ + shift = (shift - 10) >> 1; + + if ((shift >= min_pg) && (shift <= max_pg)) + def->flags |= MMU_PAGE_SIZE_DIRECT; + } + + goto out; + } + + if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { + u32 tlb1cfg, tlb1ps; + + tlb0cfg = mfspr(SPRN_TLB0CFG); + tlb1cfg = mfspr(SPRN_TLB1CFG); + tlb1ps = mfspr(SPRN_TLB1PS); + eptcfg = mfspr(SPRN_EPTCFG); + + if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) + book3e_htw_mode = PPC_HTW_E6500; + + /* + * We expect 4K subpage size and unrestricted indirect size. + * The lack of a restriction on indirect size is a Freescale + * extension, indicated by PSn = 0 but SPSn != 0. + */ + if (eptcfg != 2) + book3e_htw_mode = PPC_HTW_NONE; + + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { + struct mmu_psize_def *def = &mmu_psize_defs[psize]; + + if (!def->shift) + continue; + + if (tlb1ps & (1U << (def->shift - 10))) { + def->flags |= MMU_PAGE_SIZE_DIRECT; + + if (book3e_htw_mode && psize == MMU_PAGE_2M) + def->flags |= MMU_PAGE_SIZE_INDIRECT; + } + } + + goto out; + } +#endif +out: + /* Cleanup array and print summary */ + pr_info("MMU: Supported page sizes\n"); + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { + struct mmu_psize_def *def = &mmu_psize_defs[psize]; + const char *__page_type_names[] = { + "unsupported", + "direct", + "indirect", + "direct & indirect" + }; + if (def->flags == 0) { + def->shift = 0; + continue; + } + pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), + __page_type_names[def->flags & 0x3]); + } +} + +static void __init setup_mmu_htw(void) +{ + /* + * If we want to use HW tablewalk, enable it by patching the TLB miss + * handlers to branch to the one dedicated to it. + */ + + switch (book3e_htw_mode) { +#ifdef CONFIG_PPC_E500 + case PPC_HTW_E6500: + extlb_level_exc = EX_TLB_SIZE; + patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); + patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); + break; +#endif + } + pr_info("MMU: Book3E HW tablewalk %s\n", + book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); +} + +/* + * Early initialization of the MMU TLB code + */ +static void early_init_this_mmu(void) +{ + unsigned int mas4; + + /* Set MAS4 based on page table setting */ + + mas4 = 0x4 << MAS4_WIMGED_SHIFT; + switch (book3e_htw_mode) { + case PPC_HTW_E6500: + mas4 |= MAS4_INDD; + mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; + mas4 |= MAS4_TLBSELD(1); + mmu_pte_psize = MMU_PAGE_2M; + break; + + case PPC_HTW_NONE: + mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; + mmu_pte_psize = mmu_virtual_psize; + break; + } + mtspr(SPRN_MAS4, mas4); + +#ifdef CONFIG_PPC_E500 + if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { + unsigned int num_cams; + bool map = true; + + /* use a quarter of the TLBCAM for bolted linear map */ + num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; + + /* + * Only do the mapping once per core, or else the + * transient mapping would cause problems. + */ +#ifdef CONFIG_SMP + if (hweight32(get_tensr()) > 1) + map = false; +#endif + + if (map) + linear_map_top = map_mem_in_cams(linear_map_top, + num_cams, false, true); + } +#endif + + /* A sync won't hurt us after mucking around with + * the MMU configuration + */ + mb(); +} + +static void __init early_init_mmu_global(void) +{ + /* XXX This should be decided at runtime based on supported + * page sizes in the TLB, but for now let's assume 16M is + * always there and a good fit (which it probably is) + * + * Freescale booke only supports 4K pages in TLB0, so use that. + */ + if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) + mmu_vmemmap_psize = MMU_PAGE_4K; + else + mmu_vmemmap_psize = MMU_PAGE_16M; + + /* XXX This code only checks for TLB 0 capabilities and doesn't + * check what page size combos are supported by the HW. It + * also doesn't handle the case where a separate array holds + * the IND entries from the array loaded by the PT. + */ + /* Look for supported page sizes */ + setup_page_sizes(); + + /* Look for HW tablewalk support */ + setup_mmu_htw(); + +#ifdef CONFIG_PPC_E500 + if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { + if (book3e_htw_mode == PPC_HTW_NONE) { + extlb_level_exc = EX_TLB_SIZE; + patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); + patch_exception(0x1e0, + exc_instruction_tlb_miss_bolted_book3e); + } + } +#endif + + /* Set the global containing the top of the linear mapping + * for use by the TLB miss code + */ + linear_map_top = memblock_end_of_DRAM(); + + ioremap_bot = IOREMAP_BASE; +} + +static void __init early_mmu_set_memory_limit(void) +{ +#ifdef CONFIG_PPC_E500 + if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { + /* + * Limit memory so we dont have linear faults. + * Unlike memblock_set_current_limit, which limits + * memory available during early boot, this permanently + * reduces the memory available to Linux. We need to + * do this because highmem is not supported on 64-bit. + */ + memblock_enforce_memory_limit(linear_map_top); + } +#endif + + memblock_set_current_limit(linear_map_top); +} + +/* boot cpu only */ +void __init early_init_mmu(void) +{ + early_init_mmu_global(); + early_init_this_mmu(); + early_mmu_set_memory_limit(); +} + +void early_init_mmu_secondary(void) +{ + early_init_this_mmu(); +} + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* On non-FSL Embedded 64-bit, we adjust the RMA size to match + * the bolted TLB entry. We know for now that only 1G + * entries are supported though that may eventually + * change. + * + * on FSL Embedded 64-bit, usually all RAM is bolted, but with + * unusual memory sizes it's possible for some RAM to not be mapped + * (such RAM is not used at all by Linux, since we don't support + * highmem on 64-bit). We limit ppc64_rma_size to what would be + * mappable if this memblock is the only one. Additional memblocks + * can only increase, not decrease, the amount that ends up getting + * mapped. We still limit max to 1G even if we'll eventually map + * more. This is due to what the early init code is set up to do. + * + * We crop it to the size of the first MEMBLOCK to + * avoid going over total available memory just in case... + */ +#ifdef CONFIG_PPC_E500 + if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { + unsigned long linear_sz; + unsigned int num_cams; + + /* use a quarter of the TLBCAM for bolted linear map */ + num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; + + linear_sz = map_mem_in_cams(first_memblock_size, num_cams, + true, true); + + ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); + } else +#endif + ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); + + /* Finally limit subsequent allocations */ + memblock_set_current_limit(first_memblock_base + ppc64_rma_size); +} From 1e08132bc7c7e449d1b0348b453c67f611debb56 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 20 Aug 2024 14:42:38 +0200 Subject: [PATCH 338/374] powerpc/64e: Define mmu_pte_psize static [ Upstream commit d92b5cc29c792f1d3f0aaa3b29dddfe816c03e88 ] mmu_pte_psize is only used in the tlb_64e.c, define it static. Fixes: 25d21ad6e799 ("powerpc: Add TLB management code for 64-bit Book3E") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202408011256.1O99IB0s-lkp@intel.com/ Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://msgid.link/beb30d280eaa5d857c38a0834b147dffd6b28aa9.1724157750.git.christophe.leroy@csgroup.eu Signed-off-by: Sasha Levin --- arch/powerpc/mm/nohash/tlb_64e.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/mm/nohash/tlb_64e.c b/arch/powerpc/mm/nohash/tlb_64e.c index 1dcda261554c..b6af3ec4d001 100644 --- a/arch/powerpc/mm/nohash/tlb_64e.c +++ b/arch/powerpc/mm/nohash/tlb_64e.c @@ -33,7 +33,7 @@ * though this will probably be made common with other nohash * implementations at some point */ -int mmu_pte_psize; /* Page size used for PTE pages */ +static int mmu_pte_psize; /* Page size used for PTE pages */ int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ unsigned long linear_map_top; /* Top of linear mapping */ From 55fe39810bead547dbbc24980e880fd314dcd3fb Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 20 Aug 2024 13:28:07 +0200 Subject: [PATCH 339/374] powerpc/vdso: Don't discard rela sections [ Upstream commit 6114139c3bdde992f4a19264e4f9bfc100d8d776 ] After building the VDSO, there is a verification that it contains no dynamic relocation, see commit aff69273af61 ("vdso: Improve cmd_vdso_check to check all dynamic relocations"). This verification uses readelf -r and doesn't work if rela sections are discarded. Fixes: 8ad57add77d3 ("powerpc/build: vdso linker warning for orphan sections") Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://msgid.link/45c3e6fc76cad05ad2cac0f5b5dfb4fae86dc9d6.1724153239.git.christophe.leroy@csgroup.eu Signed-off-by: Sasha Levin --- arch/powerpc/kernel/vdso/vdso32.lds.S | 4 +++- arch/powerpc/kernel/vdso/vdso64.lds.S | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S index 426e1ccc6971..8f57107000a2 100644 --- a/arch/powerpc/kernel/vdso/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso/vdso32.lds.S @@ -74,6 +74,8 @@ SECTIONS .got : { *(.got) } :text .plt : { *(.plt) } + .rela.dyn : { *(.rela .rela*) } + _end = .; __end = .; PROVIDE(end = .); @@ -87,7 +89,7 @@ SECTIONS *(.branch_lt) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) - *(.got1 .glink .iplt .rela*) + *(.got1 .glink .iplt) } } diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S index bda6c8cdd459..400819258c06 100644 --- a/arch/powerpc/kernel/vdso/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso/vdso64.lds.S @@ -69,7 +69,7 @@ SECTIONS .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .gcc_except_table : { *(.gcc_except_table) } - .rela.dyn ALIGN(8) : { *(.rela.dyn) } + .rela.dyn ALIGN(8) : { *(.rela .rela*) } .got ALIGN(8) : { *(.got .toc) } @@ -86,7 +86,7 @@ SECTIONS *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) *(.opd) - *(.glink .iplt .plt .rela*) + *(.glink .iplt .plt) } } From 67002edd6c6c20b03d46c8951d8911c0ecfa01f5 Mon Sep 17 00:00:00 2001 From: Mohan Kumar Date: Fri, 23 Aug 2024 14:43:42 +0000 Subject: [PATCH 340/374] ASoC: tegra: Fix CBB error during probe() [ Upstream commit 6781b962d97bc52715a8db8cc17278cc3c23ebe8 ] When Tegra audio drivers are built as part of the kernel image, TIMEOUT_ERR is observed from cbb-fabric. Following is seen on Jetson AGX Orin during boot: [ 8.012482] ************************************** [ 8.017423] CPU:0, Error:cbb-fabric, Errmon:2 [ 8.021922] Error Code : TIMEOUT_ERR [ 8.025966] Overflow : Multiple TIMEOUT_ERR [ 8.030644] [ 8.032175] Error Code : TIMEOUT_ERR [ 8.036217] MASTER_ID : CCPLEX [ 8.039722] Address : 0x290a0a8 [ 8.043318] Cache : 0x1 -- Bufferable [ 8.047630] Protection : 0x2 -- Unprivileged, Non-Secure, Data Access [ 8.054628] Access_Type : Write [ 8.106130] WARNING: CPU: 0 PID: 124 at drivers/soc/tegra/cbb/tegra234-cbb.c:604 tegra234_cbb_isr+0x134/0x178 [ 8.240602] Call trace: [ 8.243126] tegra234_cbb_isr+0x134/0x178 [ 8.247261] __handle_irq_event_percpu+0x60/0x238 [ 8.252132] handle_irq_event+0x54/0xb8 These errors happen when MVC device, which is a child of AHUB device, tries to access its device registers. This happens as part of call tegra210_mvc_reset_vol_settings() in MVC device probe(). The root cause of this problem is, the child MVC device gets probed before the AHUB clock gets enabled. The AHUB clock is enabled in runtime PM resume of parent AHUB device and due to the wrong sequence of pm_runtime_enable() in AHUB driver, runtime PM resume doesn't happen for AHUB device when MVC makes register access. Fix this by calling pm_runtime_enable() for parent AHUB device before of_platform_populate() in AHUB driver. This ensures that clock becomes available when MVC makes register access. Fixes: 16e1bcc2caf4 ("ASoC: tegra: Add Tegra210 based AHUB driver") Signed-off-by: Mohan Kumar Signed-off-by: Ritu Chaudhary Signed-off-by: Sameer Pujar Link: https://patch.msgid.link/20240823144342.4123814-3-spujar@nvidia.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/tegra/tegra210_ahub.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c index 3f114a2adfce..ab3c6b2544d2 100644 --- a/sound/soc/tegra/tegra210_ahub.c +++ b/sound/soc/tegra/tegra210_ahub.c @@ -2,7 +2,7 @@ // // tegra210_ahub.c - Tegra210 AHUB driver // -// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. +// Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved. #include #include @@ -1391,11 +1391,13 @@ static int tegra_ahub_probe(struct platform_device *pdev) return err; } + pm_runtime_enable(&pdev->dev); + err = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); - if (err) + if (err) { + pm_runtime_disable(&pdev->dev); return err; - - pm_runtime_enable(&pdev->dev); + } return 0; } From 6c04d1e3ab22cc5394ef656429638a5947f87244 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Wed, 21 Aug 2024 16:28:26 +0200 Subject: [PATCH 341/374] nvmet-tcp: fix kernel crash if commands allocation fails [ Upstream commit 5572a55a6f830ee3f3a994b6b962a5c327d28cb3 ] If the commands allocation fails in nvmet_tcp_alloc_cmds() the kernel crashes in nvmet_tcp_release_queue_work() because of a NULL pointer dereference. nvmet: failed to install queue 0 cntlid 1 ret 6 Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008 Fix the bug by setting queue->nr_cmds to zero in case nvmet_tcp_alloc_cmd() fails. Fixes: 872d26a391da ("nvmet-tcp: add NVMe over TCP target driver") Signed-off-by: Maurizio Lombardi Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch Signed-off-by: Sasha Levin --- drivers/nvme/target/tcp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 380f22ee3ebb..ebf25819a7da 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -2146,8 +2146,10 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) } queue->nr_cmds = sq->size * 2; - if (nvmet_tcp_alloc_cmds(queue)) + if (nvmet_tcp_alloc_cmds(queue)) { + queue->nr_cmds = 0; return NVME_SC_INTERNAL; + } return 0; } From cf7ce11385cc2189c16e79fe66fa996e3bfa9637 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Mon, 26 Aug 2024 11:20:57 -0700 Subject: [PATCH 342/374] nvme-pci: allocate tagset on reset if necessary [ Upstream commit 6f01bdbfef3b62955cf6503a8425d527b3a5cf94 ] If a drive is unable to create IO queues on the initial probe, a subsequent reset will need to allocate the tagset if IO queue creation is successful. Without this, blk_mq_update_nr_hw_queues will crash on a bad pointer due to the invalid tagset. Fixes: eac3ef262941f62 ("nvme-pci: split the initial probe from the rest path") Reviewed-by: Sagi Grimberg Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch Signed-off-by: Sasha Levin --- drivers/nvme/host/pci.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 146d33c4839f..18d85575cdb4 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2473,6 +2473,12 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) static void nvme_pci_update_nr_queues(struct nvme_dev *dev) { + if (!dev->ctrl.tagset) { + nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, + nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); + return; + } + blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); /* free previously allocated queues that are no longer usable */ nvme_free_queues(dev, dev->online_queues); From 68e6917f3d4e977834df14cfef7d2637dbeb6b1b Mon Sep 17 00:00:00 2001 From: Bryan O'Donoghue Date: Fri, 23 Aug 2024 13:58:56 +0100 Subject: [PATCH 343/374] clk: qcom: gcc-x1e80100: Don't use parking clk_ops for QUPs [ Upstream commit ca082333b4356688be715ed9cc762fc5d3d5f4c5 ] Per Stephen Boyd's explanation in the link below, QUP RCG clocks do not need to be parked when switching frequency. A side-effect in parking to a lower frequency can be a momentary invalid clock driven on an in-use serial peripheral. This can cause "junk" to spewed out of a UART as a low-impact example. On the x1e80100-crd this serial port junk can be observed on linux-next. Apply a similar fix to the x1e80100 Global Clock controller to remediate. Link: https://lore.kernel.org/all/20240819233628.2074654-3-swboyd@chromium.org/ Fixes: 161b7c401f4b ("clk: qcom: Add Global Clock controller (GCC) driver for X1E80100") Fixes: 929c75d57566 ("clk: qcom: gcc-sm8550: Mark RCGs shared where applicable") Suggested-by: Neil Armstrong Signed-off-by: Bryan O'Donoghue Link: https://lore.kernel.org/r/20240823-x1e80100-clk-fix-v1-1-0b1b4f5a96e8@linaro.org Reviewed-by: Konrad Dybcio Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin --- drivers/clk/qcom/gcc-x1e80100.c | 48 ++++++++++++++++----------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c index 24f84c6705e5..52ea2a0888f3 100644 --- a/drivers/clk/qcom/gcc-x1e80100.c +++ b/drivers/clk/qcom/gcc-x1e80100.c @@ -670,7 +670,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { @@ -687,7 +687,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { @@ -719,7 +719,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { @@ -736,7 +736,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { @@ -768,7 +768,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { @@ -785,7 +785,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { @@ -802,7 +802,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = { @@ -819,7 +819,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = { @@ -836,7 +836,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { @@ -853,7 +853,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { @@ -870,7 +870,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { @@ -887,7 +887,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { @@ -904,7 +904,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { @@ -921,7 +921,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { @@ -938,7 +938,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = { @@ -955,7 +955,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = { @@ -972,7 +972,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = { @@ -989,7 +989,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = { @@ -1006,7 +1006,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = { @@ -1023,7 +1023,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = { @@ -1040,7 +1040,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = { @@ -1057,7 +1057,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = { @@ -1074,7 +1074,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = { .parent_data = gcc_parent_data_8, .num_parents = ARRAY_SIZE(gcc_parent_data_8), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = { @@ -1091,7 +1091,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = { From 9795a6bd717275752c570b092cfb81440711fdd5 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 21 Aug 2024 12:10:04 +0800 Subject: [PATCH 344/374] ASoc: SOF: topology: Clear SOF link platform name upon unload [ Upstream commit e0be875c5bf03a9676a6bfed9e0f1766922a7dbd ] The SOF topology loading function sets the device name for the platform component link. This should be unset when unloading the topology, otherwise a machine driver unbind/bind or reprobe would complain about an invalid component as having both its component name and of_node set: mt8186_mt6366 sound: ASoC: Both Component name/of_node are set for AFE_SOF_DL1 mt8186_mt6366 sound: error -EINVAL: Cannot register card mt8186_mt6366 sound: probe with driver mt8186_mt6366 failed with error -22 This happens with machine drivers that set the of_node separately. Clear the SOF link platform name in the topology unload callback. Fixes: 311ce4fe7637 ("ASoC: SOF: Add support for loading topologies") Signed-off-by: Chen-Yu Tsai Link: https://patch.msgid.link/20240821041006.2618855-1-wenst@chromium.org Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/sof/topology.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c index da182314aa87..ebbd99e34143 100644 --- a/sound/soc/sof/topology.c +++ b/sound/soc/sof/topology.c @@ -2050,6 +2050,8 @@ static int sof_link_unload(struct snd_soc_component *scomp, struct snd_soc_dobj if (!slink) return 0; + slink->link->platforms->name = NULL; + kfree(slink->tuples); list_del(&slink->list); kfree(slink->hw_configs); From 62f6ca61bb88eea3e4263ca7eef2f7fcdc872b2d Mon Sep 17 00:00:00 2001 From: Charlie Jenkins Date: Mon, 26 Aug 2024 09:36:46 -0700 Subject: [PATCH 345/374] riscv: selftests: Remove mmap hint address checks [ Upstream commit 83dae72ac0382693540a055ec6210dd3691a8df6 ] The mmap behavior that restricts the addresses returned by mmap caused unexpected behavior, so get rid of the test cases that check that behavior. Signed-off-by: Charlie Jenkins Fixes: 73d05262a2ca ("selftests: riscv: Generalize mm selftests") Link: https://lore.kernel.org/r/20240826-riscv_mmap-v1-2-cd8962afe47f@rivosinc.com Signed-off-by: Palmer Dabbelt Signed-off-by: Sasha Levin --- .../selftests/riscv/mm/mmap_bottomup.c | 2 - .../testing/selftests/riscv/mm/mmap_default.c | 2 - tools/testing/selftests/riscv/mm/mmap_test.h | 67 ------------------- 3 files changed, 71 deletions(-) diff --git a/tools/testing/selftests/riscv/mm/mmap_bottomup.c b/tools/testing/selftests/riscv/mm/mmap_bottomup.c index 7f7d3eb8b9c9..f9ccae50349b 100644 --- a/tools/testing/selftests/riscv/mm/mmap_bottomup.c +++ b/tools/testing/selftests/riscv/mm/mmap_bottomup.c @@ -7,8 +7,6 @@ TEST(infinite_rlimit) { EXPECT_EQ(BOTTOM_UP, memory_layout()); - - TEST_MMAPS; } TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/riscv/mm/mmap_default.c b/tools/testing/selftests/riscv/mm/mmap_default.c index 2ba3ec990006..3f53b6ecc326 100644 --- a/tools/testing/selftests/riscv/mm/mmap_default.c +++ b/tools/testing/selftests/riscv/mm/mmap_default.c @@ -7,8 +7,6 @@ TEST(default_rlimit) { EXPECT_EQ(TOP_DOWN, memory_layout()); - - TEST_MMAPS; } TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/riscv/mm/mmap_test.h b/tools/testing/selftests/riscv/mm/mmap_test.h index 3b29ca3bb3d4..75918d15919f 100644 --- a/tools/testing/selftests/riscv/mm/mmap_test.h +++ b/tools/testing/selftests/riscv/mm/mmap_test.h @@ -10,76 +10,9 @@ #define TOP_DOWN 0 #define BOTTOM_UP 1 -#if __riscv_xlen == 64 -uint64_t random_addresses[] = { - 0x19764f0d73b3a9f0, 0x016049584cecef59, 0x3580bdd3562f4acd, - 0x1164219f20b17da0, 0x07d97fcb40ff2373, 0x76ec528921272ee7, - 0x4dd48c38a3de3f70, 0x2e11415055f6997d, 0x14b43334ac476c02, - 0x375a60795aff19f6, 0x47f3051725b8ee1a, 0x4e697cf240494a9f, - 0x456b59b5c2f9e9d1, 0x101724379d63cb96, 0x7fe9ad31619528c1, - 0x2f417247c495c2ea, 0x329a5a5b82943a5e, 0x06d7a9d6adcd3827, - 0x327b0b9ee37f62d5, 0x17c7b1851dfd9b76, 0x006ebb6456ec2cd9, - 0x00836cd14146a134, 0x00e5c4dcde7126db, 0x004c29feadf75753, - 0x00d8b20149ed930c, 0x00d71574c269387a, 0x0006ebe4a82acb7a, - 0x0016135df51f471b, 0x00758bdb55455160, 0x00d0bdd949b13b32, - 0x00ecea01e7c5f54b, 0x00e37b071b9948b1, 0x0011fdd00ff57ab3, - 0x00e407294b52f5ea, 0x00567748c200ed20, 0x000d073084651046, - 0x00ac896f4365463c, 0x00eb0d49a0b26216, 0x0066a2564a982a31, - 0x002e0d20237784ae, 0x0000554ff8a77a76, 0x00006ce07a54c012, - 0x000009570516d799, 0x00000954ca15b84d, 0x0000684f0d453379, - 0x00002ae5816302b5, 0x0000042403fb54bf, 0x00004bad7392bf30, - 0x00003e73bfa4b5e3, 0x00005442c29978e0, 0x00002803f11286b6, - 0x000073875d745fc6, 0x00007cede9cb8240, 0x000027df84cc6a4f, - 0x00006d7e0e74242a, 0x00004afd0b836e02, 0x000047d0e837cd82, - 0x00003b42405efeda, 0x00001531bafa4c95, 0x00007172cae34ac4, -}; -#else -uint32_t random_addresses[] = { - 0x8dc302e0, 0x929ab1e0, 0xb47683ba, 0xea519c73, 0xa19f1c90, 0xc49ba213, - 0x8f57c625, 0xadfe5137, 0x874d4d95, 0xaa20f09d, 0xcf21ebfc, 0xda7737f1, - 0xcedf392a, 0x83026c14, 0xccedca52, 0xc6ccf826, 0xe0cd9415, 0x997472ca, - 0xa21a44c1, 0xe82196f5, 0xa23fd66b, 0xc28d5590, 0xd009cdce, 0xcf0be646, - 0x8fc8c7ff, 0xe2a85984, 0xa3d3236b, 0x89a0619d, 0xc03db924, 0xb5d4cc1b, - 0xb96ee04c, 0xd191da48, 0xb432a000, 0xaa2bebbc, 0xa2fcb289, 0xb0cca89b, - 0xb0c18d6a, 0x88f58deb, 0xa4d42d1c, 0xe4d74e86, 0x99902b09, 0x8f786d31, - 0xbec5e381, 0x9a727e65, 0xa9a65040, 0xa880d789, 0x8f1b335e, 0xfc821c1e, - 0x97e34be4, 0xbbef84ed, 0xf447d197, 0xfd7ceee2, 0xe632348d, 0xee4590f4, - 0x958992a5, 0xd57e05d6, 0xfd240970, 0xc5b0dcff, 0xd96da2c2, 0xa7ae041d, -}; -#endif - -// Only works on 64 bit -#if __riscv_xlen == 64 #define PROT (PROT_READ | PROT_WRITE) #define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) -/* mmap must return a value that doesn't use more bits than the hint address. */ -static inline unsigned long get_max_value(unsigned long input) -{ - unsigned long max_bit = (1UL << (((sizeof(unsigned long) * 8) - 1 - - __builtin_clzl(input)))); - - return max_bit + (max_bit - 1); -} - -#define TEST_MMAPS \ - ({ \ - void *mmap_addr; \ - for (int i = 0; i < ARRAY_SIZE(random_addresses); i++) { \ - mmap_addr = mmap((void *)random_addresses[i], \ - 5 * sizeof(int), PROT, FLAGS, 0, 0); \ - EXPECT_NE(MAP_FAILED, mmap_addr); \ - EXPECT_GE((void *)get_max_value(random_addresses[i]), \ - mmap_addr); \ - mmap_addr = mmap((void *)random_addresses[i], \ - 5 * sizeof(int), PROT, FLAGS, 0, 0); \ - EXPECT_NE(MAP_FAILED, mmap_addr); \ - EXPECT_GE((void *)get_max_value(random_addresses[i]), \ - mmap_addr); \ - } \ - }) -#endif /* __riscv_xlen == 64 */ - static inline int memory_layout(void) { void *value1 = mmap(NULL, sizeof(int), PROT, FLAGS, 0, 0); From 9bf1c67635b53a1f4511e0daa631d94268e83a18 Mon Sep 17 00:00:00 2001 From: Charlie Jenkins Date: Mon, 26 Aug 2024 09:36:47 -0700 Subject: [PATCH 346/374] riscv: mm: Do not restrict mmap address based on hint [ Upstream commit 2116988d5372aec51f8c4fb85bf8e305ecda47a0 ] The hint address should not forcefully restrict the addresses returned by mmap as this causes mmap to report ENOMEM when there is memory still available. Signed-off-by: Charlie Jenkins Fixes: b5b4287accd7 ("riscv: mm: Use hint address in mmap if available") Fixes: add2cc6b6515 ("RISC-V: mm: Restrict address space for sv39,sv48,sv57") Closes: https://lore.kernel.org/linux-kernel/ZbxTNjQPFKBatMq+@ghost/T/#mccb1890466bf5a488c9ce7441e57e42271895765 Link: https://lore.kernel.org/r/20240826-riscv_mmap-v1-3-cd8962afe47f@rivosinc.com Signed-off-by: Palmer Dabbelt Signed-off-by: Sasha Levin --- arch/riscv/include/asm/processor.h | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 68c3432dc6ea..6c129144ef19 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -14,36 +14,14 @@ #include -/* - * addr is a hint to the maximum userspace address that mmap should provide, so - * this macro needs to return the largest address space available so that - * mmap_end < addr, being mmap_end the top of that address space. - * See Documentation/arch/riscv/vm-layout.rst for more details. - */ #define arch_get_mmap_end(addr, len, flags) \ ({ \ - unsigned long mmap_end; \ - typeof(addr) _addr = (addr); \ - if ((_addr) == 0 || is_compat_task() || \ - ((_addr + len) > BIT(VA_BITS - 1))) \ - mmap_end = STACK_TOP_MAX; \ - else \ - mmap_end = (_addr + len); \ - mmap_end; \ + STACK_TOP_MAX; \ }) #define arch_get_mmap_base(addr, base) \ ({ \ - unsigned long mmap_base; \ - typeof(addr) _addr = (addr); \ - typeof(base) _base = (base); \ - unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \ - if ((_addr) == 0 || is_compat_task() || \ - ((_addr + len) > BIT(VA_BITS - 1))) \ - mmap_base = (_base); \ - else \ - mmap_base = (_addr + len) - rnd_gap; \ - mmap_base; \ + base; \ }) #ifdef CONFIG_64BIT From 527663015f23f83e4efb04fdd43724da905fe531 Mon Sep 17 00:00:00 2001 From: Matteo Martelli Date: Thu, 1 Aug 2024 14:07:19 +0200 Subject: [PATCH 347/374] ASoC: sunxi: sun4i-i2s: fix LRCLK polarity in i2s mode [ Upstream commit 3e83957e8dd7433a69116780d9bad217b00913ea ] This fixes the LRCLK polarity for sun8i-h3 and sun50i-h6 in i2s mode which was wrongly inverted. The LRCLK was being set in reversed logic compared to the DAI format: inverted LRCLK for SND_SOC_DAIFMT_IB_NF and SND_SOC_DAIFMT_NB_NF; normal LRCLK for SND_SOC_DAIFMT_IB_IF and SND_SOC_DAIFMT_NB_IF. Such reversed logic applies properly for DSP_A, DSP_B, LEFT_J and RIGHT_J modes but not for I2S mode, for which the LRCLK signal results reversed to what expected on the bus. The issue is due to a misinterpretation of the LRCLK polarity bit of the H3 and H6 i2s controllers. Such bit in this case does not mean "0 => normal" or "1 => inverted" according to the expected bus operation, but it means "0 => frame starts on low edge" and "1 => frame starts on high edge" (from the User Manuals). This commit fixes the LRCLK polarity by setting the LRCLK polarity bit according to the selected bus mode and renames the LRCLK polarity bit definition to avoid further confusion. Fixes: dd657eae8164 ("ASoC: sun4i-i2s: Fix the LRCK polarity") Fixes: 73adf87b7a58 ("ASoC: sun4i-i2s: Add support for H6 I2S") Signed-off-by: Matteo Martelli Link: https://patch.msgid.link/20240801-asoc-fix-sun4i-i2s-v2-1-a8e4e9daa363@gmail.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/sunxi/sun4i-i2s.c | 143 ++++++++++++++++++------------------ 1 file changed, 73 insertions(+), 70 deletions(-) diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c index 5f8d979585b6..3af0b2aab291 100644 --- a/sound/soc/sunxi/sun4i-i2s.c +++ b/sound/soc/sunxi/sun4i-i2s.c @@ -100,8 +100,8 @@ #define SUN8I_I2S_CTRL_MODE_PCM (0 << 4) #define SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK BIT(19) -#define SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED (1 << 19) -#define SUN8I_I2S_FMT0_LRCLK_POLARITY_NORMAL (0 << 19) +#define SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH (1 << 19) +#define SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW (0 << 19) #define SUN8I_I2S_FMT0_LRCK_PERIOD_MASK GENMASK(17, 8) #define SUN8I_I2S_FMT0_LRCK_PERIOD(period) ((period - 1) << 8) #define SUN8I_I2S_FMT0_BCLK_POLARITY_MASK BIT(7) @@ -729,65 +729,37 @@ static int sun4i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s, static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s, unsigned int fmt) { - u32 mode, val; + u32 mode, lrclk_pol, bclk_pol, val; u8 offset; - /* - * DAI clock polarity - * - * The setup for LRCK contradicts the datasheet, but under a - * scope it's clear that the LRCK polarity is reversed - * compared to the expected polarity on the bus. - */ - switch (fmt & SND_SOC_DAIFMT_INV_MASK) { - case SND_SOC_DAIFMT_IB_IF: - /* Invert both clocks */ - val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED; - break; - case SND_SOC_DAIFMT_IB_NF: - /* Invert bit clock */ - val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED | - SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED; - break; - case SND_SOC_DAIFMT_NB_IF: - /* Invert frame clock */ - val = 0; - break; - case SND_SOC_DAIFMT_NB_NF: - val = SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED; - break; - default: - return -EINVAL; - } - - regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG, - SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK | - SUN8I_I2S_FMT0_BCLK_POLARITY_MASK, - val); - /* DAI Mode */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: + lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; mode = SUN8I_I2S_CTRL_MODE_PCM; offset = 1; break; case SND_SOC_DAIFMT_DSP_B: + lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; mode = SUN8I_I2S_CTRL_MODE_PCM; offset = 0; break; case SND_SOC_DAIFMT_I2S: + lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW; mode = SUN8I_I2S_CTRL_MODE_LEFT; offset = 1; break; case SND_SOC_DAIFMT_LEFT_J: + lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; mode = SUN8I_I2S_CTRL_MODE_LEFT; offset = 0; break; case SND_SOC_DAIFMT_RIGHT_J: + lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; mode = SUN8I_I2S_CTRL_MODE_RIGHT; offset = 0; break; @@ -805,6 +777,35 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s, SUN8I_I2S_TX_CHAN_OFFSET_MASK, SUN8I_I2S_TX_CHAN_OFFSET(offset)); + /* DAI clock polarity */ + bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_NORMAL; + + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_IB_IF: + /* Invert both clocks */ + lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK; + bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED; + break; + case SND_SOC_DAIFMT_IB_NF: + /* Invert bit clock */ + bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED; + break; + case SND_SOC_DAIFMT_NB_IF: + /* Invert frame clock */ + lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK; + break; + case SND_SOC_DAIFMT_NB_NF: + /* No inversion */ + break; + default: + return -EINVAL; + } + + regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG, + SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK | + SUN8I_I2S_FMT0_BCLK_POLARITY_MASK, + lrclk_pol | bclk_pol); + /* DAI clock master masks */ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) { case SND_SOC_DAIFMT_BP_FP: @@ -836,65 +837,37 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s, static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s, unsigned int fmt) { - u32 mode, val; + u32 mode, lrclk_pol, bclk_pol, val; u8 offset; - /* - * DAI clock polarity - * - * The setup for LRCK contradicts the datasheet, but under a - * scope it's clear that the LRCK polarity is reversed - * compared to the expected polarity on the bus. - */ - switch (fmt & SND_SOC_DAIFMT_INV_MASK) { - case SND_SOC_DAIFMT_IB_IF: - /* Invert both clocks */ - val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED; - break; - case SND_SOC_DAIFMT_IB_NF: - /* Invert bit clock */ - val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED | - SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED; - break; - case SND_SOC_DAIFMT_NB_IF: - /* Invert frame clock */ - val = 0; - break; - case SND_SOC_DAIFMT_NB_NF: - val = SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED; - break; - default: - return -EINVAL; - } - - regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG, - SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK | - SUN8I_I2S_FMT0_BCLK_POLARITY_MASK, - val); - /* DAI Mode */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: + lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; mode = SUN8I_I2S_CTRL_MODE_PCM; offset = 1; break; case SND_SOC_DAIFMT_DSP_B: + lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; mode = SUN8I_I2S_CTRL_MODE_PCM; offset = 0; break; case SND_SOC_DAIFMT_I2S: + lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW; mode = SUN8I_I2S_CTRL_MODE_LEFT; offset = 1; break; case SND_SOC_DAIFMT_LEFT_J: + lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; mode = SUN8I_I2S_CTRL_MODE_LEFT; offset = 0; break; case SND_SOC_DAIFMT_RIGHT_J: + lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH; mode = SUN8I_I2S_CTRL_MODE_RIGHT; offset = 0; break; @@ -912,6 +885,36 @@ static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s, SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET_MASK, SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET(offset)); + /* DAI clock polarity */ + bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_NORMAL; + + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_IB_IF: + /* Invert both clocks */ + lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK; + bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED; + break; + case SND_SOC_DAIFMT_IB_NF: + /* Invert bit clock */ + bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED; + break; + case SND_SOC_DAIFMT_NB_IF: + /* Invert frame clock */ + lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK; + break; + case SND_SOC_DAIFMT_NB_NF: + /* No inversion */ + break; + default: + return -EINVAL; + } + + regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG, + SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK | + SUN8I_I2S_FMT0_BCLK_POLARITY_MASK, + lrclk_pol | bclk_pol); + + /* DAI clock master masks */ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) { case SND_SOC_DAIFMT_BP_FP: From 0d1e7301225c40ef9b5b43d71c9945b233ee137f Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 19 Aug 2024 16:36:26 -0700 Subject: [PATCH 348/374] clk: qcom: gcc-sm8550: Don't use parking clk_ops for QUPs [ Upstream commit d10eeb75168b84ed9559c58efe2756c2e0bc052a ] The QUPs aren't shared in a way that requires parking the RCG at an always on parent in case some other entity turns on the clk. The hardware is capable of setting a new frequency itself with the DFS mode, so parking is unnecessary. Furthermore, there aren't any GDSCs for these devices, so there isn't a possibility of the GDSC turning on the clks for housekeeping purposes. This wasn't a problem to mark these clks shared until we started parking shared RCGs at clk registration time in commit 01a0a6cc8cfd ("clk: qcom: Park shared RCGs upon registration"). Parking at init is actually harmful to the UART when earlycon is used. If the device is pumping out data while the frequency changes you'll see garbage on the serial console until the driver can probe and actually set a proper frequency. Revert the QUP part of commit 929c75d57566 ("clk: qcom: gcc-sm8550: Mark RCGs shared where applicable") so that the QUPs don't get parked during clk registration and break UART operations. Fixes: 01a0a6cc8cfd ("clk: qcom: Park shared RCGs upon registration") Fixes: 929c75d57566 ("clk: qcom: gcc-sm8550: Mark RCGs shared where applicable") Cc: Konrad Dybcio Cc: Bjorn Andersson Cc: Taniya Das Reported-by: Amit Pundir Closes: https://lore.kernel.org/CAMi1Hd1KQBE4kKUdAn8E5FV+BiKzuv+8FoyWQrrTHPDoYTuhgA@mail.gmail.com Signed-off-by: Stephen Boyd Link: https://lore.kernel.org/r/20240819233628.2074654-2-swboyd@chromium.org Tested-by: Amit Pundir Tested-by: Neil Armstrong # on SM8550-QRD Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin --- drivers/clk/qcom/gcc-sm8550.c | 52 +++++++++++++++++------------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c index 26d7349e7642..482ed17733ea 100644 --- a/drivers/clk/qcom/gcc-sm8550.c +++ b/drivers/clk/qcom/gcc-sm8550.c @@ -536,7 +536,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }, }; @@ -551,7 +551,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }, }; @@ -566,7 +566,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }, }; @@ -581,7 +581,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }, }; @@ -596,7 +596,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }, }; @@ -611,7 +611,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }, }; @@ -626,7 +626,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }, }; @@ -641,7 +641,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }, }; @@ -656,7 +656,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }, }; @@ -671,7 +671,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }, }; @@ -700,7 +700,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { @@ -717,7 +717,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { @@ -750,7 +750,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { @@ -767,7 +767,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { @@ -784,7 +784,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { @@ -801,7 +801,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { @@ -818,7 +818,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = { @@ -835,7 +835,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = { @@ -852,7 +852,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = { @@ -869,7 +869,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = { @@ -886,7 +886,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = { @@ -903,7 +903,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = { @@ -920,7 +920,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = { @@ -937,7 +937,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = { @@ -975,7 +975,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = { .parent_data = gcc_parent_data_8, .num_parents = ARRAY_SIZE(gcc_parent_data_8), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = { @@ -992,7 +992,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = { From 709aba10e2fa7b97ff2948898f21c531b56dcdcf Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 19 Aug 2024 16:36:27 -0700 Subject: [PATCH 349/374] clk: qcom: gcc-sm8550: Don't park the USB RCG at registration time [ Upstream commit 7b6dfa1bbe7f727315d2e05a2fc8e4cfeb779156 ] Amit Pundir reports that audio and USB-C host mode stops working if the gcc_usb30_prim_master_clk_src clk is registered and clk_rcg2_shared_init() parks it on XO. Skip parking this clk at registration time to fix those issues. Partially revert commit 01a0a6cc8cfd ("clk: qcom: Park shared RCGs upon registration") by skipping the parking bit for this clk, but keep the part where we cache the config register. That's still necessary to figure out the true parent of the clk at registration time. Fixes: 01a0a6cc8cfd ("clk: qcom: Park shared RCGs upon registration") Fixes: 929c75d57566 ("clk: qcom: gcc-sm8550: Mark RCGs shared where applicable") Cc: Konrad Dybcio Cc: Bjorn Andersson Cc: Taniya Das Reported-by: Amit Pundir Closes: https://lore.kernel.org/CAMi1Hd1KQBE4kKUdAn8E5FV+BiKzuv+8FoyWQrrTHPDoYTuhgA@mail.gmail.com Signed-off-by: Stephen Boyd Link: https://lore.kernel.org/r/20240819233628.2074654-3-swboyd@chromium.org Tested-by: Amit Pundir Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin --- drivers/clk/qcom/clk-rcg.h | 1 + drivers/clk/qcom/clk-rcg2.c | 30 ++++++++++++++++++++++++++++++ drivers/clk/qcom/gcc-sm8550.c | 2 +- 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h index d7414361e432..8e0f3372dc7a 100644 --- a/drivers/clk/qcom/clk-rcg.h +++ b/drivers/clk/qcom/clk-rcg.h @@ -198,6 +198,7 @@ extern const struct clk_ops clk_byte2_ops; extern const struct clk_ops clk_pixel_ops; extern const struct clk_ops clk_gfx3d_ops; extern const struct clk_ops clk_rcg2_shared_ops; +extern const struct clk_ops clk_rcg2_shared_no_init_park_ops; extern const struct clk_ops clk_dp_ops; struct clk_rcg_dfs_data { diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 30b19bd39d08..bf26c5448f00 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -1348,6 +1348,36 @@ const struct clk_ops clk_rcg2_shared_ops = { }; EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops); +static int clk_rcg2_shared_no_init_park(struct clk_hw *hw) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + + /* + * Read the config register so that the parent is properly mapped at + * registration time. + */ + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg); + + return 0; +} + +/* + * Like clk_rcg2_shared_ops but skip the init so that the clk frequency is left + * unchanged at registration time. + */ +const struct clk_ops clk_rcg2_shared_no_init_park_ops = { + .init = clk_rcg2_shared_no_init_park, + .enable = clk_rcg2_shared_enable, + .disable = clk_rcg2_shared_disable, + .get_parent = clk_rcg2_shared_get_parent, + .set_parent = clk_rcg2_shared_set_parent, + .recalc_rate = clk_rcg2_shared_recalc_rate, + .determine_rate = clk_rcg2_determine_rate, + .set_rate = clk_rcg2_shared_set_rate, + .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, +}; +EXPORT_SYMBOL_GPL(clk_rcg2_shared_no_init_park_ops); + /* Common APIs to be used for DFS based RCGR */ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, struct freq_tbl *f) diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c index 482ed17733ea..eae42f756c13 100644 --- a/drivers/clk/qcom/gcc-sm8550.c +++ b/drivers/clk/qcom/gcc-sm8550.c @@ -1159,7 +1159,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = { .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_shared_no_init_park_ops, }, }; From 74182cc77497e2deca0541c6331f1b536afa4fe1 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 28 Aug 2024 12:37:19 +1000 Subject: [PATCH 350/374] nouveau: fix the fwsec sb verification register. [ Upstream commit f33b9ab0495b7e3bb01bf6d76045f078e20ada65 ] This aligns with what open gpu does, the 0x15 hex is just to trick you. Fixes: 176fdcbddfd2 ("drm/nouveau/gsp/r535: add support for booting GSP-RM") Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20240828023720.1596602-1-airlied@gmail.com Signed-off-by: Sasha Levin --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c index 330d72b1a4af..52412965fac1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c @@ -324,7 +324,7 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp) return ret; /* Verify. */ - err = nvkm_rd32(device, 0x001400 + (0xf * 4)) & 0x0000ffff; + err = nvkm_rd32(device, 0x001400 + (0x15 * 4)) & 0x0000ffff; if (err) { nvkm_error(subdev, "fwsec-sb: 0x%04x\n", err); return -EIO; From e74af8de40b575d45ef16e4b29e8408c2e39b7be Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 29 Aug 2024 18:58:37 +0300 Subject: [PATCH 351/374] drm/i915/fence: Mark debug_fence_init_onstack() with __maybe_unused [ Upstream commit fcd9e8afd546f6ced378d078345a89bf346d065e ] When debug_fence_init_onstack() is unused (CONFIG_DRM_I915_SELFTEST=n), it prevents kernel builds with clang, `make W=1` and CONFIG_WERROR=y: .../i915_sw_fence.c:97:20: error: unused function 'debug_fence_init_onstack' [-Werror,-Wunused-function] 97 | static inline void debug_fence_init_onstack(struct i915_sw_fence *fence) | ^~~~~~~~~~~~~~~~~~~~~~~~ Fix this by marking debug_fence_init_onstack() with __maybe_unused. See also commit 6863f5643dd7 ("kbuild: allow Clang to find unused static inline functions for W=1 build"). Fixes: 214707fc2ce0 ("drm/i915/selftests: Wrap a timer into a i915_sw_fence") Signed-off-by: Andy Shevchenko Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20240829155950.1141978-2-andriy.shevchenko@linux.intel.com Signed-off-by: Jani Nikula (cherry picked from commit 5bf472058ffb43baf6a4cdfe1d7f58c4c194c688) Signed-off-by: Joonas Lahtinen Signed-off-by: Sasha Levin --- drivers/gpu/drm/i915/i915_sw_fence.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 8a9aad523eec..d4020ff3549a 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -51,7 +51,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence) debug_object_init(fence, &i915_sw_fence_debug_descr); } -static inline void debug_fence_init_onstack(struct i915_sw_fence *fence) +static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence) { debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr); } @@ -94,7 +94,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence) { } -static inline void debug_fence_init_onstack(struct i915_sw_fence *fence) +static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence) { } From c2017deb0330e8a48b66efd856a6dac056005215 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 29 Aug 2024 18:58:38 +0300 Subject: [PATCH 352/374] drm/i915/fence: Mark debug_fence_free() with __maybe_unused [ Upstream commit f99999536128b14b5d765a9982763b5134efdd79 ] When debug_fence_free() is unused (CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS=n), it prevents kernel builds with clang, `make W=1` and CONFIG_WERROR=y: .../i915_sw_fence.c:118:20: error: unused function 'debug_fence_free' [-Werror,-Wunused-function] 118 | static inline void debug_fence_free(struct i915_sw_fence *fence) | ^~~~~~~~~~~~~~~~ Fix this by marking debug_fence_free() with __maybe_unused. See also commit 6863f5643dd7 ("kbuild: allow Clang to find unused static inline functions for W=1 build"). Fixes: fc1584059d6c ("drm/i915: Integrate i915_sw_fence with debugobjects") Signed-off-by: Andy Shevchenko Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20240829155950.1141978-3-andriy.shevchenko@linux.intel.com Signed-off-by: Jani Nikula (cherry picked from commit 8be4dce5ea6f2368cc25edc71989c4690fa66964) Signed-off-by: Joonas Lahtinen Signed-off-by: Sasha Levin --- drivers/gpu/drm/i915/i915_sw_fence.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index d4020ff3549a..1d4cc91c0e40 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -77,7 +77,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence) debug_object_destroy(fence, &i915_sw_fence_debug_descr); } -static inline void debug_fence_free(struct i915_sw_fence *fence) +static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence) { debug_object_free(fence, &i915_sw_fence_debug_descr); smp_wmb(); /* flush the change in state before reallocation */ @@ -115,7 +115,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence) { } -static inline void debug_fence_free(struct i915_sw_fence *fence) +static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence) { } From fcb0ebed9345e3037484c53f4cd7edc867f27f84 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 26 Aug 2024 17:08:32 +0200 Subject: [PATCH 353/374] gpio: rockchip: fix OF node leak in probe() [ Upstream commit adad2e460e505a556f5ea6f0dc16fe95e62d5d76 ] Driver code is leaking OF node reference from of_get_parent() in probe(). Fixes: 936ee2675eee ("gpio/rockchip: add driver for rockchip gpio") Signed-off-by: Krzysztof Kozlowski Reviewed-by: Heiko Stuebner Reviewed-by: Shawn Lin Link: https://lore.kernel.org/r/20240826150832.65657-1-krzysztof.kozlowski@linaro.org Signed-off-by: Bartosz Golaszewski Signed-off-by: Sasha Levin --- drivers/gpio/gpio-rockchip.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c index 0bd339813110..365ab947983c 100644 --- a/drivers/gpio/gpio-rockchip.c +++ b/drivers/gpio/gpio-rockchip.c @@ -713,6 +713,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev) return -ENODEV; pctldev = of_pinctrl_get(pctlnp); + of_node_put(pctlnp); if (!pctldev) return -EPROBE_DEFER; From d195d5fae9b82465b14b7bcf0665cee9c482b793 Mon Sep 17 00:00:00 2001 From: Liao Chen Date: Mon, 2 Sep 2024 11:58:48 +0000 Subject: [PATCH 354/374] gpio: modepin: Enable module autoloading [ Upstream commit a5135526426df5319d5f4bcd15ae57c45a97714b ] Add MODULE_DEVICE_TABLE(), so modules could be properly autoloaded based on the alias from of_device_id table. Fixes: 7687a5b0ee93 ("gpio: modepin: Add driver support for modepin GPIO controller") Signed-off-by: Liao Chen Reviewed-by: Michal Simek Link: https://lore.kernel.org/r/20240902115848.904227-1-liaochen4@huawei.com Signed-off-by: Bartosz Golaszewski Signed-off-by: Sasha Levin --- drivers/gpio/gpio-zynqmp-modepin.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c index a0d69387c153..2f3c9ebfa78d 100644 --- a/drivers/gpio/gpio-zynqmp-modepin.c +++ b/drivers/gpio/gpio-zynqmp-modepin.c @@ -146,6 +146,7 @@ static const struct of_device_id modepin_platform_id[] = { { .compatible = "xlnx,zynqmp-gpio-modepin", }, { } }; +MODULE_DEVICE_TABLE(of, modepin_platform_id); static struct platform_driver modepin_platform_driver = { .driver = { From 1a46c7f6546b73cbf36f5a618a1a6bbb45391eb3 Mon Sep 17 00:00:00 2001 From: Paulo Alcantara Date: Tue, 3 Sep 2024 10:53:23 -0300 Subject: [PATCH 355/374] smb: client: fix double put of @cfile in smb2_rename_path() [ Upstream commit 3523a3df03c6f04f7ea9c2e7050102657e331a4f ] If smb2_set_path_attr() is called with a valid @cfile and returned -EINVAL, we need to call cifs_get_writable_path() again as the reference of @cfile was already dropped by previous smb2_compound_op() call. Fixes: 71f15c90e785 ("smb: client: retry compound request without reusing lease") Signed-off-by: Paulo Alcantara (Red Hat) Cc: David Howells Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/smb/client/smb2inode.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c index 2a2847601f26..11a1c53c64e0 100644 --- a/fs/smb/client/smb2inode.c +++ b/fs/smb/client/smb2inode.c @@ -1106,6 +1106,8 @@ int smb2_rename_path(const unsigned int xid, co, DELETE, SMB2_OP_RENAME, cfile, source_dentry); if (rc == -EINVAL) { cifs_dbg(FYI, "invalid lease key, resending request without lease"); + cifs_get_writable_path(tcon, from_name, + FIND_WR_WITH_DELETE, &cfile); rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb, co, DELETE, SMB2_OP_RENAME, cfile, NULL); } From 775a2238aa989a9345c11542fbac19b87de1a397 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 19 Aug 2024 00:11:31 +0000 Subject: [PATCH 356/374] riscv: Fix toolchain vector detection [ Upstream commit 5ba7a75a53dffbf727e842b5847859bb482ac4aa ] A recent change to gcc flags rv64iv as no longer valid: cc1: sorry, unimplemented: Currently the 'V' implementation requires the 'M' extension and as a result vector support is disabled. Fix this by adding m to our toolchain vector detection code. Signed-off-by: Anton Blanchard Fixes: fa8e7cce55da ("riscv: Enable Vector code to be built") Link: https://lore.kernel.org/r/20240819001131.1738806-1-antonb@tenstorrent.com Signed-off-by: Palmer Dabbelt Signed-off-by: Sasha Levin --- arch/riscv/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 0525ee2d63c7..006232b67b46 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -545,8 +545,8 @@ config RISCV_ISA_SVPBMT config TOOLCHAIN_HAS_V bool default y - depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64iv) - depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32iv) + depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64imv) + depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32imv) depends on LLD_VERSION >= 140000 || LD_VERSION >= 23800 depends on AS_HAS_OPTION_ARCH From 339b880e7ced0cf865e02a1dc3f32c82d1de1c8f Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Tue, 27 Aug 2024 08:52:30 +0200 Subject: [PATCH 357/374] riscv: Do not restrict memory size because of linear mapping on nommu [ Upstream commit 5f771088a2b5edd6f2c5c9f34484ca18dc389f3e ] It makes no sense to restrict physical memory size because of linear mapping size constraints when there is no linear mapping, so only do that when mmu is enabled. Reported-by: Geert Uytterhoeven Closes: https://lore.kernel.org/linux-riscv/CAMuHMdW0bnJt5GMRtOZGkTiM7GK4UaLJCDMF_Ouq++fnDKi3_A@mail.gmail.com/ Fixes: 3b6564427aea ("riscv: Fix linear mapping checks for non-contiguous memory regions") Signed-off-by: Alexandre Ghiti Tested-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20240827065230.145021-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt Signed-off-by: Sasha Levin --- arch/riscv/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index c5c66f53971a..91346c9da8ef 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -251,7 +251,7 @@ static void __init setup_bootmem(void) * The size of the linear page mapping may restrict the amount of * usable RAM. */ - if (IS_ENABLED(CONFIG_64BIT)) { + if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) { max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE; memblock_cap_memory_range(phys_ram_base, max_mapped_addr - phys_ram_base); From 3445b6ccc792268119a9f415a51c9a21129205bf Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Thu, 21 Mar 2024 16:01:25 -0700 Subject: [PATCH 358/374] riscv: Add tracepoints for SBI calls and returns [ Upstream commit 56c1c1a09ab93c7b7c957860f01f8600d6c03143 ] These are useful for measuring the latency of SBI calls. The SBI HSM extension is excluded because those functions are called from contexts such as cpuidle where instrumentation is not allowed. Reviewed-by: Andrew Jones Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20240321230131.1838105-1-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt Stable-dep-of: 1ff95eb2bebd ("riscv: Fix RISCV_ALTERNATIVE_EARLY") Signed-off-by: Sasha Levin --- arch/riscv/include/asm/trace.h | 54 ++++++++++++++++++++++++++++++++++ arch/riscv/kernel/sbi.c | 7 +++++ 2 files changed, 61 insertions(+) create mode 100644 arch/riscv/include/asm/trace.h diff --git a/arch/riscv/include/asm/trace.h b/arch/riscv/include/asm/trace.h new file mode 100644 index 000000000000..6151cee5450c --- /dev/null +++ b/arch/riscv/include/asm/trace.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM riscv + +#if !defined(_TRACE_RISCV_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RISCV_H + +#include + +TRACE_EVENT_CONDITION(sbi_call, + TP_PROTO(int ext, int fid), + TP_ARGS(ext, fid), + TP_CONDITION(ext != SBI_EXT_HSM), + + TP_STRUCT__entry( + __field(int, ext) + __field(int, fid) + ), + + TP_fast_assign( + __entry->ext = ext; + __entry->fid = fid; + ), + + TP_printk("ext=0x%x fid=%d", __entry->ext, __entry->fid) +); + +TRACE_EVENT_CONDITION(sbi_return, + TP_PROTO(int ext, long error, long value), + TP_ARGS(ext, error, value), + TP_CONDITION(ext != SBI_EXT_HSM), + + TP_STRUCT__entry( + __field(long, error) + __field(long, value) + ), + + TP_fast_assign( + __entry->error = error; + __entry->value = value; + ), + + TP_printk("error=%ld value=0x%lx", __entry->error, __entry->value) +); + +#endif /* _TRACE_RISCV_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH asm +#define TRACE_INCLUDE_FILE trace + +#include diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index e66e0999a800..a1d21d8f5293 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -14,6 +14,9 @@ #include #include +#define CREATE_TRACE_POINTS +#include + /* default SBI version is 0.1 */ unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; EXPORT_SYMBOL(sbi_spec_version); @@ -31,6 +34,8 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, { struct sbiret ret; + trace_sbi_call(ext, fid); + register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); @@ -46,6 +51,8 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, ret.error = a0; ret.value = a1; + trace_sbi_return(ext, ret.error, ret.value); + return ret; } EXPORT_SYMBOL(sbi_ecall); From bb63fe4f5f31006ba544fd588234ce407ead93be Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Fri, 22 Mar 2024 12:26:29 +0100 Subject: [PATCH 359/374] riscv: Improve sbi_ecall() code generation by reordering arguments [ Upstream commit 16badacd8af48980c546839626d0329bab32b4c3 ] The sbi_ecall() function arguments are not in the same order as the ecall arguments, so we end up re-ordering the registers before the ecall which is useless and costly. So simply reorder the arguments in the same way as expected by ecall. Instead of reordering directly the arguments of sbi_ecall(), use a proxy macro since the current ordering is more natural. Before: Dump of assembler code for function sbi_ecall: 0xffffffff800085e0 <+0>: add sp,sp,-32 0xffffffff800085e2 <+2>: sd s0,24(sp) 0xffffffff800085e4 <+4>: mv t1,a0 0xffffffff800085e6 <+6>: add s0,sp,32 0xffffffff800085e8 <+8>: mv t3,a1 0xffffffff800085ea <+10>: mv a0,a2 0xffffffff800085ec <+12>: mv a1,a3 0xffffffff800085ee <+14>: mv a2,a4 0xffffffff800085f0 <+16>: mv a3,a5 0xffffffff800085f2 <+18>: mv a4,a6 0xffffffff800085f4 <+20>: mv a5,a7 0xffffffff800085f6 <+22>: mv a6,t3 0xffffffff800085f8 <+24>: mv a7,t1 0xffffffff800085fa <+26>: ecall 0xffffffff800085fe <+30>: ld s0,24(sp) 0xffffffff80008600 <+32>: add sp,sp,32 0xffffffff80008602 <+34>: ret After: Dump of assembler code for function __sbi_ecall: 0xffffffff8000b6b2 <+0>: add sp,sp,-32 0xffffffff8000b6b4 <+2>: sd s0,24(sp) 0xffffffff8000b6b6 <+4>: add s0,sp,32 0xffffffff8000b6b8 <+6>: ecall 0xffffffff8000b6bc <+10>: ld s0,24(sp) 0xffffffff8000b6be <+12>: add sp,sp,32 0xffffffff8000b6c0 <+14>: ret Signed-off-by: Alexandre Ghiti Reviewed-by: Atish Patra Reviewed-by: Yunhui Cui Link: https://lore.kernel.org/r/20240322112629.68170-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt Stable-dep-of: 1ff95eb2bebd ("riscv: Fix RISCV_ALTERNATIVE_EARLY") Signed-off-by: Sasha Levin --- arch/riscv/include/asm/sbi.h | 10 ++++++---- arch/riscv/kernel/sbi.c | 10 +++++----- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 1079e214fe85..7cffd4ffecd0 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -304,10 +304,12 @@ struct sbiret { }; void sbi_init(void); -struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, - unsigned long arg1, unsigned long arg2, - unsigned long arg3, unsigned long arg4, - unsigned long arg5); +struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, + int fid, int ext); +#define sbi_ecall(e, f, a0, a1, a2, a3, a4, a5) \ + __sbi_ecall(a0, a1, a2, a3, a4, a5, f, e) #ifdef CONFIG_RISCV_SBI_V01 void sbi_console_putchar(int ch); diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index a1d21d8f5293..837bdab2601b 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -27,10 +27,10 @@ static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long arg4, unsigned long arg5) __ro_after_init; -struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, - unsigned long arg1, unsigned long arg2, - unsigned long arg3, unsigned long arg4, - unsigned long arg5) +struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, + int fid, int ext) { struct sbiret ret; @@ -55,7 +55,7 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, return ret; } -EXPORT_SYMBOL(sbi_ecall); +EXPORT_SYMBOL(__sbi_ecall); int sbi_err_map_linux_errno(int err) { From 82f134ddd4f5da11c277bd1aa02e2a733179725a Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Thu, 29 Aug 2024 18:50:48 +0200 Subject: [PATCH 360/374] riscv: Fix RISCV_ALTERNATIVE_EARLY [ Upstream commit 1ff95eb2bebda50c4c5406caaf201e0fcb24cc8f ] RISCV_ALTERNATIVE_EARLY will issue sbi_ecall() very early in the boot process, before the first memory mapping is setup so we can't have any instrumentation happening here. In addition, when the kernel is relocatable, we must also not issue any relocation this early since they would have been patched virtually only. So, instead of disabling instrumentation for the whole kernel/sbi.c file and compiling it with -fno-pie, simply move __sbi_ecall() and __sbi_base_ecall() into their own file where this is fixed. Reported-by: Conor Dooley Closes: https://lore.kernel.org/linux-riscv/20240813-pony-truck-3e7a83e9759e@spud/ Reported-by: syzbot+cfbcb82adf6d7279fd35@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-riscv/00000000000065062c061fcec37b@google.com/ Fixes: 1745cfafebdf ("riscv: don't use global static vars to store alternative data") Signed-off-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20240829165048.49756-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt Signed-off-by: Sasha Levin --- arch/riscv/include/asm/sbi.h | 20 ++++++++++- arch/riscv/kernel/Makefile | 6 +++- arch/riscv/kernel/sbi.c | 63 ----------------------------------- arch/riscv/kernel/sbi_ecall.c | 48 ++++++++++++++++++++++++++ 4 files changed, 72 insertions(+), 65 deletions(-) create mode 100644 arch/riscv/kernel/sbi_ecall.c diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 7cffd4ffecd0..7bd3746028c9 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -9,6 +9,7 @@ #include #include +#include #ifdef CONFIG_RISCV_SBI enum sbi_ext_id { @@ -304,6 +305,7 @@ struct sbiret { }; void sbi_init(void); +long __sbi_base_ecall(int fid); struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5, @@ -373,7 +375,23 @@ static inline unsigned long sbi_mk_version(unsigned long major, | (minor & SBI_SPEC_VERSION_MINOR_MASK); } -int sbi_err_map_linux_errno(int err); +static inline int sbi_err_map_linux_errno(int err) +{ + switch (err) { + case SBI_SUCCESS: + return 0; + case SBI_ERR_DENIED: + return -EPERM; + case SBI_ERR_INVALID_PARAM: + return -EINVAL; + case SBI_ERR_INVALID_ADDRESS: + return -EFAULT; + case SBI_ERR_NOT_SUPPORTED: + case SBI_ERR_FAILURE: + default: + return -ENOTSUPP; + }; +} extern bool sbi_debug_console_available; int sbi_debug_console_write(const char *bytes, unsigned int num_bytes); diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 5b243d46f4b1..1d71002e4f7b 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -20,17 +20,21 @@ endif ifdef CONFIG_RISCV_ALTERNATIVE_EARLY CFLAGS_alternative.o := -mcmodel=medany CFLAGS_cpufeature.o := -mcmodel=medany +CFLAGS_sbi_ecall.o := -mcmodel=medany ifdef CONFIG_FTRACE CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_sbi_ecall.o = $(CC_FLAGS_FTRACE) endif ifdef CONFIG_RELOCATABLE CFLAGS_alternative.o += -fno-pie CFLAGS_cpufeature.o += -fno-pie +CFLAGS_sbi_ecall.o += -fno-pie endif ifdef CONFIG_KASAN KASAN_SANITIZE_alternative.o := n KASAN_SANITIZE_cpufeature.o := n +KASAN_SANITIZE_sbi_ecall.o := n endif endif @@ -86,7 +90,7 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o -obj-$(CONFIG_RISCV_SBI) += sbi.o +obj-$(CONFIG_RISCV_SBI) += sbi.o sbi_ecall.o ifeq ($(CONFIG_RISCV_SBI), y) obj-$(CONFIG_SMP) += sbi-ipi.o obj-$(CONFIG_SMP) += cpu_ops_sbi.o diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index 837bdab2601b..1989b8cade1b 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -14,9 +14,6 @@ #include #include -#define CREATE_TRACE_POINTS -#include - /* default SBI version is 0.1 */ unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; EXPORT_SYMBOL(sbi_spec_version); @@ -27,55 +24,6 @@ static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long arg4, unsigned long arg5) __ro_after_init; -struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1, - unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5, - int fid, int ext) -{ - struct sbiret ret; - - trace_sbi_call(ext, fid); - - register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); - register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); - register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); - register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); - register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4); - register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5); - register uintptr_t a6 asm ("a6") = (uintptr_t)(fid); - register uintptr_t a7 asm ("a7") = (uintptr_t)(ext); - asm volatile ("ecall" - : "+r" (a0), "+r" (a1) - : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7) - : "memory"); - ret.error = a0; - ret.value = a1; - - trace_sbi_return(ext, ret.error, ret.value); - - return ret; -} -EXPORT_SYMBOL(__sbi_ecall); - -int sbi_err_map_linux_errno(int err) -{ - switch (err) { - case SBI_SUCCESS: - return 0; - case SBI_ERR_DENIED: - return -EPERM; - case SBI_ERR_INVALID_PARAM: - return -EINVAL; - case SBI_ERR_INVALID_ADDRESS: - return -EFAULT; - case SBI_ERR_NOT_SUPPORTED: - case SBI_ERR_FAILURE: - default: - return -ENOTSUPP; - }; -} -EXPORT_SYMBOL(sbi_err_map_linux_errno); - #ifdef CONFIG_RISCV_SBI_V01 static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask) { @@ -535,17 +483,6 @@ long sbi_probe_extension(int extid) } EXPORT_SYMBOL(sbi_probe_extension); -static long __sbi_base_ecall(int fid) -{ - struct sbiret ret; - - ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0); - if (!ret.error) - return ret.value; - else - return sbi_err_map_linux_errno(ret.error); -} - static inline long sbi_get_spec_version(void) { return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION); diff --git a/arch/riscv/kernel/sbi_ecall.c b/arch/riscv/kernel/sbi_ecall.c new file mode 100644 index 000000000000..24aabb4fbde3 --- /dev/null +++ b/arch/riscv/kernel/sbi_ecall.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Rivos Inc. */ + +#include +#define CREATE_TRACE_POINTS +#include + +long __sbi_base_ecall(int fid) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0); + if (!ret.error) + return ret.value; + else + return sbi_err_map_linux_errno(ret.error); +} +EXPORT_SYMBOL(__sbi_base_ecall); + +struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, + int fid, int ext) +{ + struct sbiret ret; + + trace_sbi_call(ext, fid); + + register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); + register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); + register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); + register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); + register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4); + register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5); + register uintptr_t a6 asm ("a6") = (uintptr_t)(fid); + register uintptr_t a7 asm ("a7") = (uintptr_t)(ext); + asm volatile ("ecall" + : "+r" (a0), "+r" (a1) + : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7) + : "memory"); + ret.error = a0; + ret.value = a1; + + trace_sbi_return(ext, ret.error, ret.value); + + return ret; +} +EXPORT_SYMBOL(__sbi_ecall); From 72a57a7b4f830f5ca9653af52f2ac2c818293177 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 3 Sep 2024 15:11:18 +0100 Subject: [PATCH 361/374] cifs: Fix zero_point init on inode initialisation [ Upstream commit 517b58c1f9242a6b4ac9443d95569dee58bf6b8b ] Fix cifs_fattr_to_inode() such that the ->zero_point tracking variable is initialised when the inode is initialised. Fixes: 3ee1a1fc3981 ("cifs: Cut over to using netfslib") Signed-off-by: David Howells Reviewed-by: Paulo Alcantara (Red Hat) cc: Jeff Layton cc: linux-cifs@vger.kernel.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/smb/client/inode.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c index dd0afa23734c..73e2e6c230b7 100644 --- a/fs/smb/client/inode.c +++ b/fs/smb/client/inode.c @@ -172,6 +172,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr, CIFS_I(inode)->time = 0; /* force reval */ return -ESTALE; } + if (inode->i_state & I_NEW) + CIFS_I(inode)->netfs.zero_point = fattr->cf_eof; cifs_revalidate_cache(inode, fattr); From 39517a901b8f8e8298d62fed73b0ebef1203f59a Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 3 Sep 2024 11:15:41 +0100 Subject: [PATCH 362/374] cifs: Fix SMB1 readv/writev callback in the same way as SMB2/3 [ Upstream commit a68c74865f517e26728735aba0ae05055eaff76c ] Port a number of SMB2/3 async readv/writev fixes to the SMB1 transport: commit a88d60903696c01de577558080ec4fc738a70475 cifs: Don't advance the I/O iterator before terminating subrequest commit ce5291e56081730ec7d87bc9aa41f3de73ff3256 cifs: Defer read completion commit 1da29f2c39b67b846b74205c81bf0ccd96d34727 netfs, cifs: Fix handling of short DIO read Fixes: 3ee1a1fc3981 ("cifs: Cut over to using netfslib") Signed-off-by: David Howells Reported-by: Steve French Reviewed-by: Paulo Alcantara cc: Jeff Layton cc: linux-cifs@vger.kernel.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/smb/client/cifssmb.c | 54 +++++++++++++++++++++++++++++++++++------ 1 file changed, 46 insertions(+), 8 deletions(-) diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c index 6dce70f17208..cfae2e918209 100644 --- a/fs/smb/client/cifssmb.c +++ b/fs/smb/client/cifssmb.c @@ -1261,16 +1261,32 @@ CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms, int *oplock, return rc; } +static void cifs_readv_worker(struct work_struct *work) +{ + struct cifs_io_subrequest *rdata = + container_of(work, struct cifs_io_subrequest, subreq.work); + + netfs_subreq_terminated(&rdata->subreq, + (rdata->result == 0 || rdata->result == -EAGAIN) ? + rdata->got_bytes : rdata->result, true); +} + static void cifs_readv_callback(struct mid_q_entry *mid) { struct cifs_io_subrequest *rdata = mid->callback_data; + struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode); struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct smb_rqst rqst = { .rq_iov = rdata->iov, .rq_nvec = 2, .rq_iter = rdata->subreq.io_iter }; - struct cifs_credits credits = { .value = 1, .instance = 0 }; + struct cifs_credits credits = { + .value = 1, + .instance = 0, + .rreq_debug_id = rdata->rreq->debug_id, + .rreq_debug_index = rdata->subreq.debug_index, + }; cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n", __func__, mid->mid, mid->mid_state, rdata->result, @@ -1282,6 +1298,7 @@ cifs_readv_callback(struct mid_q_entry *mid) if (server->sign) { int rc = 0; + iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes); rc = cifs_verify_signature(&rqst, server, mid->sequence_number); if (rc) @@ -1306,13 +1323,21 @@ cifs_readv_callback(struct mid_q_entry *mid) rdata->result = -EIO; } - if (rdata->result == 0 || rdata->result == -EAGAIN) - iov_iter_advance(&rdata->subreq.io_iter, rdata->got_bytes); + if (rdata->result == -ENODATA) { + __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); + rdata->result = 0; + } else { + if (rdata->got_bytes < rdata->actual_len && + rdata->subreq.start + rdata->subreq.transferred + rdata->got_bytes == + ictx->remote_i_size) { + __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); + rdata->result = 0; + } + } + rdata->credits.value = 0; - netfs_subreq_terminated(&rdata->subreq, - (rdata->result == 0 || rdata->result == -EAGAIN) ? - rdata->got_bytes : rdata->result, - false); + INIT_WORK(&rdata->subreq.work, cifs_readv_worker); + queue_work(cifsiod_wq, &rdata->subreq.work); release_mid(mid); add_credits(server, &credits, 0); } @@ -1619,9 +1644,15 @@ static void cifs_writev_callback(struct mid_q_entry *mid) { struct cifs_io_subrequest *wdata = mid->callback_data; + struct TCP_Server_Info *server = wdata->server; struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink); WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; - struct cifs_credits credits = { .value = 1, .instance = 0 }; + struct cifs_credits credits = { + .value = 1, + .instance = 0, + .rreq_debug_id = wdata->rreq->debug_id, + .rreq_debug_index = wdata->subreq.debug_index, + }; ssize_t result; size_t written; @@ -1657,9 +1688,16 @@ cifs_writev_callback(struct mid_q_entry *mid) break; } + trace_smb3_rw_credits(credits.rreq_debug_id, credits.rreq_debug_index, + wdata->credits.value, + server->credits, server->in_flight, + 0, cifs_trace_rw_credits_write_response_clear); wdata->credits.value = 0; cifs_write_subrequest_terminated(wdata, result, true); release_mid(mid); + trace_smb3_rw_credits(credits.rreq_debug_id, credits.rreq_debug_index, 0, + server->credits, server->in_flight, + credits.value, cifs_trace_rw_credits_write_response_add); add_credits(tcon->ses->server, &credits, 0); } From 1be0c309a33cdb01e8ec7058d6b5b5fbe01b8d89 Mon Sep 17 00:00:00 2001 From: Weiwen Hu Date: Mon, 3 Jun 2024 20:56:59 +0800 Subject: [PATCH 363/374] nvme: rename nvme_sc_to_pr_err to nvme_status_to_pr_err [ Upstream commit 22f19a584d7045e0509f103dbc5c0acfd6415163 ] This should better match its semantic. "sc" is used in the NVMe spec to specifically refer to the last 8 bits in the status field. We should not reuse "sc" here. Signed-off-by: Weiwen Hu Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch Stable-dep-of: 899d2e5a4e3d ("nvmet: Identify-Active Namespace ID List command should reject invalid nsid") Signed-off-by: Sasha Levin --- drivers/nvme/host/pr.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index 8fa1ffcdaed4..a6db5edfab03 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -72,12 +72,12 @@ static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, return nvme_submit_sync_cmd(ns->queue, c, data, data_len); } -static int nvme_sc_to_pr_err(int nvme_sc) +static int nvme_status_to_pr_err(int status) { - if (nvme_is_path_error(nvme_sc)) + if (nvme_is_path_error(status)) return PR_STS_PATH_FAILED; - switch (nvme_sc & 0x7ff) { + switch (status & 0x7ff) { case NVME_SC_SUCCESS: return PR_STS_SUCCESS; case NVME_SC_RESERVATION_CONFLICT: @@ -121,7 +121,7 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10, if (ret < 0) return ret; - return nvme_sc_to_pr_err(ret); + return nvme_status_to_pr_err(ret); } static int nvme_pr_register(struct block_device *bdev, u64 old, @@ -196,7 +196,7 @@ static int nvme_pr_resv_report(struct block_device *bdev, void *data, if (ret < 0) return ret; - return nvme_sc_to_pr_err(ret); + return nvme_status_to_pr_err(ret); } static int nvme_pr_read_keys(struct block_device *bdev, From d433e595f1ac5783adff82adbc9219983f20df37 Mon Sep 17 00:00:00 2001 From: Weiwen Hu Date: Mon, 3 Jun 2024 20:57:00 +0800 Subject: [PATCH 364/374] nvme: fix status magic numbers [ Upstream commit d89a5c6705998ddc42b104f8eabd3c4b9e8fde08 ] Replaced some magic numbers about SC and SCT with enum and macro. Signed-off-by: Weiwen Hu Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch Stable-dep-of: 899d2e5a4e3d ("nvmet: Identify-Active Namespace ID List command should reject invalid nsid") Signed-off-by: Sasha Levin --- drivers/nvme/host/constants.c | 2 +- drivers/nvme/host/core.c | 18 +++++++++--------- drivers/nvme/host/multipath.c | 2 +- drivers/nvme/host/nvme.h | 4 ++-- drivers/nvme/host/pr.c | 2 +- include/linux/nvme.h | 14 ++++++++++++-- 6 files changed, 26 insertions(+), 16 deletions(-) diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c index 6f2ebb5fcdb0..2b9e6cfaf2a8 100644 --- a/drivers/nvme/host/constants.c +++ b/drivers/nvme/host/constants.c @@ -173,7 +173,7 @@ static const char * const nvme_statuses[] = { const char *nvme_get_error_status_str(u16 status) { - status &= 0x7ff; + status &= NVME_SCT_SC_MASK; if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status]) return nvme_statuses[status]; return "Unknown"; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index d973d063bbf5..431f98f45388 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -261,7 +261,7 @@ void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) static blk_status_t nvme_error_status(u16 status) { - switch (status & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_SUCCESS: return BLK_STS_OK; case NVME_SC_CAP_EXCEEDED: @@ -329,8 +329,8 @@ static void nvme_log_error(struct request *req) nvme_sect_to_lba(ns->head, blk_rq_pos(req)), blk_rq_bytes(req) >> ns->head->lba_shift, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ nr->status & NVME_SC_MORE ? "MORE " : "", nr->status & NVME_SC_DNR ? "DNR " : ""); return; @@ -341,8 +341,8 @@ static void nvme_log_error(struct request *req) nvme_get_admin_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ nr->status & NVME_SC_MORE ? "MORE " : "", nr->status & NVME_SC_DNR ? "DNR " : ""); } @@ -359,8 +359,8 @@ static void nvme_log_err_passthru(struct request *req) nvme_get_admin_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ nr->status & NVME_SC_MORE ? "MORE " : "", nr->status & NVME_SC_DNR ? "DNR " : "", nr->cmd->common.cdw10, @@ -388,7 +388,7 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) nvme_req(req)->retries >= nvme_max_retries) return COMPLETE; - if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) + if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED) return AUTHENTICATE; if (req->cmd_flags & REQ_NVME_MPATH) { @@ -1224,7 +1224,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); /* * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: - * + * * The host should send Keep Alive commands at half of the Keep Alive Timeout * accounting for transport roundtrip times [..]. */ diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index d8b6b4648eaf..03a6868f4dbc 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -83,7 +83,7 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) void nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; - u16 status = nvme_req(req)->status & 0x7ff; + u16 status = nvme_req(req)->status & NVME_SCT_SC_MASK; unsigned long flags; struct bio *bio; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 68b400f9c42d..2b35304e520d 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -689,7 +689,7 @@ static inline u32 nvme_bytes_to_numd(size_t len) static inline bool nvme_is_ana_error(u16 status) { - switch (status & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_ANA_TRANSITION: case NVME_SC_ANA_INACCESSIBLE: case NVME_SC_ANA_PERSISTENT_LOSS: @@ -702,7 +702,7 @@ static inline bool nvme_is_ana_error(u16 status) static inline bool nvme_is_path_error(u16 status) { /* check for a status code type of 'path related status' */ - return (status & 0x700) == 0x300; + return (status & NVME_SCT_MASK) == NVME_SCT_PATH; } /* diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index a6db5edfab03..7347ddf85f00 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -77,7 +77,7 @@ static int nvme_status_to_pr_err(int status) if (nvme_is_path_error(status)) return PR_STS_PATH_FAILED; - switch (status & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_SUCCESS: return PR_STS_SUCCESS; case NVME_SC_RESERVATION_CONFLICT: diff --git a/include/linux/nvme.h b/include/linux/nvme.h index c693ac344ec0..ed0d668e77c5 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -1848,6 +1848,7 @@ enum { /* * Generic Command Status: */ + NVME_SCT_GENERIC = 0x0, NVME_SC_SUCCESS = 0x0, NVME_SC_INVALID_OPCODE = 0x1, NVME_SC_INVALID_FIELD = 0x2, @@ -1895,6 +1896,7 @@ enum { /* * Command Specific Status: */ + NVME_SCT_COMMAND_SPECIFIC = 0x100, NVME_SC_CQ_INVALID = 0x100, NVME_SC_QID_INVALID = 0x101, NVME_SC_QUEUE_SIZE = 0x102, @@ -1968,6 +1970,7 @@ enum { /* * Media and Data Integrity Errors: */ + NVME_SCT_MEDIA_ERROR = 0x200, NVME_SC_WRITE_FAULT = 0x280, NVME_SC_READ_ERROR = 0x281, NVME_SC_GUARD_CHECK = 0x282, @@ -1980,6 +1983,7 @@ enum { /* * Path-related Errors: */ + NVME_SCT_PATH = 0x300, NVME_SC_INTERNAL_PATH_ERROR = 0x300, NVME_SC_ANA_PERSISTENT_LOSS = 0x301, NVME_SC_ANA_INACCESSIBLE = 0x302, @@ -1988,11 +1992,17 @@ enum { NVME_SC_HOST_PATH_ERROR = 0x370, NVME_SC_HOST_ABORTED_CMD = 0x371, - NVME_SC_CRD = 0x1800, + NVME_SC_MASK = 0x00ff, /* Status Code */ + NVME_SCT_MASK = 0x0700, /* Status Code Type */ + NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK, + + NVME_SC_CRD = 0x1800, /* Command Retry Delayed */ NVME_SC_MORE = 0x2000, - NVME_SC_DNR = 0x4000, + NVME_SC_DNR = 0x4000, /* Do Not Retry */ }; +#define NVME_SCT(status) ((status) >> 8 & 7) + struct nvme_completion { /* * Used by Admin and Fabrics commands to return data: From 2443c10be369aae4e4dadc0fb5fc80b809d28dd1 Mon Sep 17 00:00:00 2001 From: Weiwen Hu Date: Mon, 3 Jun 2024 20:57:01 +0800 Subject: [PATCH 365/374] nvme: rename CDR/MORE/DNR to NVME_STATUS_* [ Upstream commit dd0b0a4a2c5d7209457dc172997d1243ad269cfa ] CDR/MORE/DNR fields are not belonging to SC in the NVMe spec, rename them to NVME_STATUS_* to avoid confusion. Signed-off-by: Weiwen Hu Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch Stable-dep-of: 899d2e5a4e3d ("nvmet: Identify-Active Namespace ID List command should reject invalid nsid") Signed-off-by: Sasha Levin --- drivers/nvme/host/core.c | 22 ++++++------ drivers/nvme/host/fabrics.c | 10 +++--- drivers/nvme/host/fault_inject.c | 2 +- drivers/nvme/host/fc.c | 6 ++-- drivers/nvme/host/nvme.h | 2 +- drivers/nvme/target/admin-cmd.c | 24 +++++++------- drivers/nvme/target/core.c | 46 +++++++++++++------------- drivers/nvme/target/discovery.c | 14 ++++---- drivers/nvme/target/fabrics-cmd-auth.c | 16 ++++----- drivers/nvme/target/fabrics-cmd.c | 36 ++++++++++---------- drivers/nvme/target/io-cmd-bdev.c | 12 +++---- drivers/nvme/target/passthru.c | 10 +++--- drivers/nvme/target/rdma.c | 10 +++--- drivers/nvme/target/tcp.c | 4 +-- drivers/nvme/target/zns.c | 30 ++++++++--------- include/linux/nvme.h | 6 ++-- 16 files changed, 125 insertions(+), 125 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 431f98f45388..5569cf4183b2 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -307,7 +307,7 @@ static void nvme_retry_req(struct request *req) u16 crd; /* The mask and shift result must be <= 3 */ - crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; + crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11; if (crd) delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; @@ -331,8 +331,8 @@ static void nvme_log_error(struct request *req) nvme_get_error_status_str(nr->status), NVME_SCT(nr->status), /* Status Code Type */ nr->status & NVME_SC_MASK, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : ""); + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : ""); return; } @@ -343,8 +343,8 @@ static void nvme_log_error(struct request *req) nvme_get_error_status_str(nr->status), NVME_SCT(nr->status), /* Status Code Type */ nr->status & NVME_SC_MASK, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : ""); + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : ""); } static void nvme_log_err_passthru(struct request *req) @@ -361,8 +361,8 @@ static void nvme_log_err_passthru(struct request *req) nvme_get_error_status_str(nr->status), NVME_SCT(nr->status), /* Status Code Type */ nr->status & NVME_SC_MASK, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : "", + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : "", nr->cmd->common.cdw10, nr->cmd->common.cdw11, nr->cmd->common.cdw12, @@ -384,7 +384,7 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) return COMPLETE; if (blk_noretry_request(req) || - (nvme_req(req)->status & NVME_SC_DNR) || + (nvme_req(req)->status & NVME_STATUS_DNR) || nvme_req(req)->retries >= nvme_max_retries) return COMPLETE; @@ -3887,7 +3887,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) { - int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; + int ret = NVME_SC_INVALID_NS | NVME_STATUS_DNR; if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { dev_err(ns->ctrl->device, @@ -3903,7 +3903,7 @@ static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) * * TODO: we should probably schedule a delayed retry here. */ - if (ret > 0 && (ret & NVME_SC_DNR)) + if (ret > 0 && (ret & NVME_STATUS_DNR)) nvme_ns_remove(ns); } @@ -4095,7 +4095,7 @@ static void nvme_scan_work(struct work_struct *work) * they report) but don't actually support it. */ ret = nvme_scan_ns_list(ctrl); - if (ret > 0 && ret & NVME_SC_DNR) + if (ret > 0 && ret & NVME_STATUS_DNR) nvme_scan_ns_sequential(ctrl); } mutex_unlock(&ctrl->scan_lock); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index ceb9c0ed3120..b5a4b5fd573e 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -187,7 +187,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) if (unlikely(ret != 0)) dev_err(ctrl->device, "Property Get error: %d, offset %#x\n", - ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off); return ret; } @@ -233,7 +233,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) if (unlikely(ret != 0)) dev_err(ctrl->device, "Property Get error: %d, offset %#x\n", - ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off); return ret; } EXPORT_SYMBOL_GPL(nvmf_reg_read64); @@ -275,7 +275,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) if (unlikely(ret)) dev_err(ctrl->device, "Property Set error: %d, offset %#x\n", - ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off); return ret; } EXPORT_SYMBOL_GPL(nvmf_reg_write32); @@ -295,7 +295,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, int errval, int offset, struct nvme_command *cmd, struct nvmf_connect_data *data) { - int err_sctype = errval & ~NVME_SC_DNR; + int err_sctype = errval & ~NVME_STATUS_DNR; if (errval < 0) { dev_err(ctrl->device, @@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); */ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status) { - if (status > 0 && (status & NVME_SC_DNR)) + if (status > 0 && (status & NVME_STATUS_DNR)) return false; if (status == -EKEYREJECTED) diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c index 1ba10a5c656d..1d1b6441a339 100644 --- a/drivers/nvme/host/fault_inject.c +++ b/drivers/nvme/host/fault_inject.c @@ -75,7 +75,7 @@ void nvme_should_fail(struct request *req) /* inject status code and DNR bit */ status = fault_inject->status; if (fault_inject->dont_retry) - status |= NVME_SC_DNR; + status |= NVME_STATUS_DNR; nvme_req(req)->status = status; } } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index f0b081332749..beaad6576a67 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3132,7 +3132,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (ctrl->ctrl.icdoff) { dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", ctrl->ctrl.icdoff); - ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out_stop_keep_alive; } @@ -3140,7 +3140,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) { dev_err(ctrl->ctrl.device, "Mandatory sgls are not supported!\n"); - ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out_stop_keep_alive; } @@ -3325,7 +3325,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); } else { if (portptr->port_state == FC_OBJSTATE_ONLINE) { - if (status > 0 && (status & NVME_SC_DNR)) + if (status > 0 && (status & NVME_STATUS_DNR)) dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: reconnect failure\n", ctrl->cnum); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 2b35304e520d..5e66bcb34d53 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -877,7 +877,7 @@ enum { NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1), /* Set BLK_MQ_REQ_RESERVED when allocating request */ NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2), - /* Retry command when NVME_SC_DNR is not set in the result */ + /* Retry command when NVME_STATUS_DNR is not set in the result */ NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3), }; diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index f5b7054a4a05..f7e1156ac7ec 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -344,7 +344,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) pr_debug("unhandled lid %d on qid %d\n", req->cmd->get_log_page.lid, req->sq->qid); req->error_loc = offsetof(struct nvme_get_log_page_command, lid); - nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR); } static void nvmet_execute_identify_ctrl(struct nvmet_req *req) @@ -496,7 +496,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { req->error_loc = offsetof(struct nvme_identify, nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out; } @@ -662,7 +662,7 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req) if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, off) != NVME_IDENTIFY_DATA_SIZE - off) - status = NVME_SC_INTERNAL | NVME_SC_DNR; + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; out: nvmet_req_complete(req, status); @@ -724,7 +724,7 @@ static void nvmet_execute_identify(struct nvmet_req *req) pr_debug("unhandled identify cns %d on qid %d\n", req->cmd->identify.cns, req->sq->qid); req->error_loc = offsetof(struct nvme_identify, cns); - nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR); } /* @@ -807,7 +807,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) if (val32 & ~mask) { req->error_loc = offsetof(struct nvme_common_command, cdw11); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); @@ -833,7 +833,7 @@ void nvmet_execute_set_features(struct nvmet_req *req) ncqr = (cdw11 >> 16) & 0xffff; nsqr = cdw11 & 0xffff; if (ncqr == 0xffff || nsqr == 0xffff) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } nvmet_set_result(req, @@ -846,14 +846,14 @@ void nvmet_execute_set_features(struct nvmet_req *req) status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL); break; case NVME_FEAT_HOST_ID: - status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; break; case NVME_FEAT_WRITE_PROTECT: status = nvmet_set_feat_write_protect(req); break; default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -939,7 +939,7 @@ void nvmet_execute_get_features(struct nvmet_req *req) if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { req->error_loc = offsetof(struct nvme_common_command, cdw11); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -952,7 +952,7 @@ void nvmet_execute_get_features(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -969,7 +969,7 @@ void nvmet_execute_async_event(struct nvmet_req *req) mutex_lock(&ctrl->lock); if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR); return; } ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; @@ -1006,7 +1006,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) if (nvme_is_fabrics(cmd)) return nvmet_parse_fabrics_admin_cmd(req); if (unlikely(!nvmet_check_auth_status(req))) - return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; + return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR; if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) return nvmet_parse_discovery_cmd(req); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 4ff460ba2826..c0973810e6af 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -55,18 +55,18 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) return NVME_SC_SUCCESS; case -ENOSPC: req->error_loc = offsetof(struct nvme_rw_command, length); - return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; + return NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR; case -EREMOTEIO: req->error_loc = offsetof(struct nvme_rw_command, slba); - return NVME_SC_LBA_RANGE | NVME_SC_DNR; + return NVME_SC_LBA_RANGE | NVME_STATUS_DNR; case -EOPNOTSUPP: req->error_loc = offsetof(struct nvme_common_command, opcode); switch (req->cmd->common.opcode) { case nvme_cmd_dsm: case nvme_cmd_write_zeroes: - return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; + return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR; default: - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } break; case -ENODATA: @@ -76,7 +76,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) fallthrough; default: req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INTERNAL | NVME_SC_DNR; + return NVME_SC_INTERNAL | NVME_STATUS_DNR; } } @@ -86,7 +86,7 @@ u16 nvmet_report_invalid_opcode(struct nvmet_req *req) req->sq->qid); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, @@ -97,7 +97,7 @@ u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, { if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; } return 0; } @@ -106,7 +106,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) { if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; } return 0; } @@ -115,7 +115,7 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len) { if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; } return 0; } @@ -145,7 +145,7 @@ static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl) while (ctrl->nr_async_event_cmds) { req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_STATUS_DNR); mutex_lock(&ctrl->lock); } mutex_unlock(&ctrl->lock); @@ -444,7 +444,7 @@ u16 nvmet_req_find_ns(struct nvmet_req *req) req->error_loc = offsetof(struct nvme_common_command, nsid); if (nvmet_subsys_nsid_exists(subsys, nsid)) return NVME_SC_INTERNAL_PATH_ERROR; - return NVME_SC_INVALID_NS | NVME_SC_DNR; + return NVME_SC_INVALID_NS | NVME_STATUS_DNR; } percpu_ref_get(&req->ns->ref); @@ -904,7 +904,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) return nvmet_parse_fabrics_io_cmd(req); if (unlikely(!nvmet_check_auth_status(req))) - return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; + return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR; ret = nvmet_check_ctrl_status(req); if (unlikely(ret)) @@ -967,7 +967,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, /* no support for fused commands yet */ if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { req->error_loc = offsetof(struct nvme_common_command, flags); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto fail; } @@ -978,7 +978,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, */ if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { req->error_loc = offsetof(struct nvme_common_command, flags); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto fail; } @@ -996,7 +996,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, trace_nvmet_req_init(req, req->cmd); if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto fail; } @@ -1023,7 +1023,7 @@ bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len) { if (unlikely(len != req->transfer_len)) { req->error_loc = offsetof(struct nvme_common_command, dptr); - nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR); return false; } @@ -1035,7 +1035,7 @@ bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) { if (unlikely(data_len > req->transfer_len)) { req->error_loc = offsetof(struct nvme_common_command, dptr); - nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR); return false; } @@ -1304,18 +1304,18 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req) if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { pr_err("got cmd %d while CC.EN == 0 on qid = %d\n", req->cmd->common.opcode, req->sq->qid); - return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; } if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n", req->cmd->common.opcode, req->sq->qid); - return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; } if (unlikely(!nvmet_check_auth_status(req))) { pr_warn("qid %d not authenticated\n", req->sq->qid); - return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; + return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR; } return 0; } @@ -1389,7 +1389,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, int ret; u16 status; - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; subsys = nvmet_find_get_subsys(req->port, subsysnqn); if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", @@ -1405,7 +1405,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, hostnqn, subsysnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); up_read(&nvmet_config_sem); - status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_common_command, dptr); goto out_put_subsystem; } @@ -1456,7 +1456,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, subsys->cntlid_min, subsys->cntlid_max, GFP_KERNEL); if (ret < 0) { - status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; + status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; goto out_free_sqs; } ctrl->cntlid = ret; diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index ce54da8c6b36..28843df5fa7c 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -179,7 +179,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) if (req->cmd->get_log_page.lid != NVME_LOG_DISC) { req->error_loc = offsetof(struct nvme_get_log_page_command, lid); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -187,7 +187,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) if (offset & 0x3) { req->error_loc = offsetof(struct nvme_get_log_page_command, lpo); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -256,7 +256,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req) if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) { req->error_loc = offsetof(struct nvme_identify, cns); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -320,7 +320,7 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -345,7 +345,7 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -361,7 +361,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) cmd->common.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } switch (cmd->common.opcode) { @@ -386,7 +386,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) default: pr_debug("unhandled cmd %d\n", cmd->common.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } } diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c index cb34d644ed08..3f2857c17d95 100644 --- a/drivers/nvme/target/fabrics-cmd-auth.c +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -189,26 +189,26 @@ void nvmet_execute_auth_send(struct nvmet_req *req) u8 dhchap_status; if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, secp); goto done; } if (req->cmd->auth_send.spsp0 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, spsp0); goto done; } if (req->cmd->auth_send.spsp1 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, spsp1); goto done; } tl = le32_to_cpu(req->cmd->auth_send.tl); if (!tl) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, tl); goto done; @@ -437,26 +437,26 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) u16 status = 0; if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, secp); goto done; } if (req->cmd->auth_receive.spsp0 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, spsp0); goto done; } if (req->cmd->auth_receive.spsp1 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, spsp1); goto done; } al = le32_to_cpu(req->cmd->auth_receive.al); if (!al) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, al); goto done; diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 69d77d34bec1..c4b2eddd5666 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -18,7 +18,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req) if (req->cmd->prop_set.attrib & 1) { req->error_loc = offsetof(struct nvmf_property_set_command, attrib); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -29,7 +29,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvmf_property_set_command, offset); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } out: nvmet_req_complete(req, status); @@ -50,7 +50,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) val = ctrl->cap; break; default: - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } } else { @@ -65,7 +65,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) val = ctrl->csts; break; default: - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } } @@ -105,7 +105,7 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req) pr_debug("received unknown capsule type 0x%x\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } return 0; @@ -128,7 +128,7 @@ u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req) pr_debug("received unknown capsule type 0x%x\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } return 0; @@ -147,14 +147,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) pr_warn("queue size zero!\n"); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); - ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; goto err; } if (ctrl->sqs[qid] != NULL) { pr_warn("qid %u has already been created\n", qid); req->error_loc = offsetof(struct nvmf_connect_command, qid); - return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; } /* for fabrics, this value applies to only the I/O Submission Queues */ @@ -163,14 +163,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) sqsize, mqes, ctrl->cntlid); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); - return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + return NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; } old = cmpxchg(&req->sq->ctrl, NULL, ctrl); if (old) { pr_warn("queue already connected!\n"); req->error_loc = offsetof(struct nvmf_connect_command, opcode); - return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; + return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; } /* note: convert queue size from 0's-based value to 1's-based value */ @@ -230,14 +230,14 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); req->error_loc = offsetof(struct nvmf_connect_command, recfmt); - status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; + status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR; goto out; } if (unlikely(d->cntlid != cpu_to_le16(0xffff))) { pr_warn("connect attempt for invalid controller ID %#x\n", d->cntlid); - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); goto out; } @@ -257,7 +257,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) dhchap_status); nvmet_ctrl_put(ctrl); if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED) - status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR); + status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR); else status = NVME_SC_INTERNAL; goto out; @@ -305,7 +305,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); - status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; + status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR; goto out; } @@ -314,13 +314,13 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, le16_to_cpu(d->cntlid), req); if (!ctrl) { - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; goto out; } if (unlikely(qid > ctrl->subsys->max_qid)) { pr_warn("invalid queue id (%d)\n", qid); - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); goto out_ctrl_put; } @@ -350,13 +350,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req) pr_debug("invalid command 0x%x on unconnected queue.\n", cmd->fabrics.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } if (cmd->fabrics.fctype != nvme_fabrics_type_connect) { pr_debug("invalid capsule type 0x%x on unconnected queue.\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } if (cmd->connect.qid == 0) diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 6426aac2634a..e511b055ece7 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -135,11 +135,11 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) */ switch (blk_sts) { case BLK_STS_NOSPC: - status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; + status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_rw_command, length); break; case BLK_STS_TARGET: - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_rw_command, slba); break; case BLK_STS_NOTSUPP: @@ -147,10 +147,10 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) switch (req->cmd->common.opcode) { case nvme_cmd_dsm: case nvme_cmd_write_zeroes: - status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; + status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR; break; default: - status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } break; case BLK_STS_MEDIUM: @@ -159,7 +159,7 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) break; case BLK_STS_IOERR: default: - status = NVME_SC_INTERNAL | NVME_SC_DNR; + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_common_command, opcode); } @@ -356,7 +356,7 @@ u16 nvmet_bdev_flush(struct nvmet_req *req) return 0; if (blkdev_issue_flush(req->ns->bdev)) - return NVME_SC_INTERNAL | NVME_SC_DNR; + return NVME_SC_INTERNAL | NVME_STATUS_DNR; return 0; } diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index f003782d4ecf..24d0e2418d2e 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -306,7 +306,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) ns = nvme_find_get_ns(ctrl, nsid); if (unlikely(!ns)) { pr_err("failed to get passthru ns nsid:%u\n", nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out; } @@ -426,7 +426,7 @@ u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) * emulated in the future if regular targets grow support for * this feature. */ - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } return nvmet_setup_passthru_command(req); @@ -478,7 +478,7 @@ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req) case NVME_FEAT_RESV_PERSIST: /* No reservations, see nvmet_parse_passthru_io_cmd() */ default: - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } } @@ -546,7 +546,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) req->p.use_workqueue = true; return NVME_SC_SUCCESS; } - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; case NVME_ID_CNS_NS: req->execute = nvmet_passthru_execute_cmd; req->p.use_workqueue = true; @@ -558,7 +558,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) req->p.use_workqueue = true; return NVME_SC_SUCCESS; } - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; default: return nvmet_setup_passthru_command(req); } diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 689bb5d3cfdc..498b3ca59651 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -852,12 +852,12 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) if (!nvme_is_write(rsp->req.cmd)) { rsp->req.error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } if (off + len > rsp->queue->dev->inline_data_size) { pr_err("invalid inline data offset!\n"); - return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; } /* no data command? */ @@ -919,7 +919,7 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) pr_err("invalid SGL subtype: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } case NVME_KEY_SGL_FMT_DATA_DESC: switch (sgl->type & 0xf) { @@ -931,12 +931,12 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) pr_err("invalid SGL subtype: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } default: pr_err("invalid SGL type: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; } } diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index ebf25819a7da..45b46c55681f 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -416,10 +416,10 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET)) { if (!nvme_is_write(cmd->req.cmd)) - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; if (len > cmd->req.port->inline_data_size) - return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; cmd->pdu_len = len; } cmd->req.transfer_len += len; diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 0021d06041c1..af9e13be7678 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -100,7 +100,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req) if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { req->error_loc = offsetof(struct nvme_identify, nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out; } @@ -121,7 +121,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req) } if (!bdev_is_zoned(req->ns->bdev)) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_identify, nsid); goto out; } @@ -158,17 +158,17 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) if (sect >= get_capacity(req->ns->bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); - return NVME_SC_LBA_RANGE | NVME_SC_DNR; + return NVME_SC_LBA_RANGE | NVME_STATUS_DNR; } if (out_bufsize < sizeof(struct nvme_zone_report)) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } switch (req->cmd->zmr.pr) { @@ -177,7 +177,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) break; default: req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } switch (req->cmd->zmr.zrasf) { @@ -193,7 +193,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } return NVME_SC_SUCCESS; @@ -341,7 +341,7 @@ static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret) return NVME_SC_SUCCESS; case -EINVAL: case -EIO: - return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; + return NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR; default: return NVME_SC_INTERNAL; } @@ -463,7 +463,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) default: /* this is needed to quiet compiler warning */ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } return NVME_SC_SUCCESS; @@ -481,7 +481,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) if (op == REQ_OP_LAST) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); - status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; + status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR; goto out; } @@ -493,13 +493,13 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) if (sect >= get_capacity(bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; goto out; } if (sect & (zone_sectors - 1)) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -551,13 +551,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) if (sect >= get_capacity(req->ns->bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_rw_command, slba); - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; goto out; } if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { req->error_loc = offsetof(struct nvme_rw_command, slba); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -590,7 +590,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) } if (total_len != nvmet_rw_data_len(req)) { - status = NVME_SC_INTERNAL | NVME_SC_DNR; + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; goto out_put_bio; } diff --git a/include/linux/nvme.h b/include/linux/nvme.h index ed0d668e77c5..efda407622c1 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -1996,9 +1996,9 @@ enum { NVME_SCT_MASK = 0x0700, /* Status Code Type */ NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK, - NVME_SC_CRD = 0x1800, /* Command Retry Delayed */ - NVME_SC_MORE = 0x2000, - NVME_SC_DNR = 0x4000, /* Do Not Retry */ + NVME_STATUS_CRD = 0x1800, /* Command Retry Delayed */ + NVME_STATUS_MORE = 0x2000, + NVME_STATUS_DNR = 0x4000, /* Do Not Retry */ }; #define NVME_SCT(status) ((status) >> 8 & 7) From 5ac3d0a487d166b6cbb0494644cabb5ef58d12b3 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Thu, 29 Aug 2024 11:52:14 +0200 Subject: [PATCH 366/374] nvmet: Identify-Active Namespace ID List command should reject invalid nsid [ Upstream commit 899d2e5a4e3d36689e8938e152f4b69a4bcc6b4d ] nsid values of 0xFFFFFFFE and 0XFFFFFFFF should be rejected with a status code of "Invalid Namespace or Format". See NVMe Base Specification, Active Namespace ID list (CNS 02h). Fixes: a07b4970f464 ("nvmet: add a generic NVMe target") Signed-off-by: Maurizio Lombardi Reviewed-by: Sagi Grimberg Signed-off-by: Keith Busch Signed-off-by: Sasha Levin --- drivers/nvme/target/admin-cmd.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index f7e1156ac7ec..85006b2df8ae 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -587,6 +587,16 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req) u16 status = 0; int i = 0; + /* + * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid + * See NVMe Base Specification, Active Namespace ID list (CNS 02h). + */ + if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) { + req->error_loc = offsetof(struct nvme_identify, nsid); + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; + goto out; + } + list = kzalloc(buf_size, GFP_KERNEL); if (!list) { status = NVME_SC_INTERNAL; From 7c890ef60bf417d3fe5c6f7a9f6cef0e1d77f74f Mon Sep 17 00:00:00 2001 From: Li Nan Date: Wed, 4 Sep 2024 11:13:48 +0800 Subject: [PATCH 367/374] ublk_drv: fix NULL pointer dereference in ublk_ctrl_start_recovery() [ Upstream commit e58f5142f88320a5b1449f96a146f2f24615c5c7 ] When two UBLK_CMD_START_USER_RECOVERY commands are submitted, the first one sets 'ubq->ubq_daemon' to NULL, and the second one triggers WARN in ublk_queue_reinit() and subsequently a NULL pointer dereference issue. Fix it by adding the check in ublk_ctrl_start_recovery() and return immediately in case of zero 'ub->nr_queues_ready'. BUG: kernel NULL pointer dereference, address: 0000000000000028 RIP: 0010:ublk_ctrl_start_recovery.constprop.0+0x82/0x180 Call Trace: ? __die+0x20/0x70 ? page_fault_oops+0x75/0x170 ? exc_page_fault+0x64/0x140 ? asm_exc_page_fault+0x22/0x30 ? ublk_ctrl_start_recovery.constprop.0+0x82/0x180 ublk_ctrl_uring_cmd+0x4f7/0x6c0 ? pick_next_task_idle+0x26/0x40 io_uring_cmd+0x9a/0x1b0 io_issue_sqe+0x193/0x3f0 io_wq_submit_work+0x9b/0x390 io_worker_handle_work+0x165/0x360 io_wq_worker+0xcb/0x2f0 ? finish_task_switch.isra.0+0x203/0x290 ? finish_task_switch.isra.0+0x203/0x290 ? __pfx_io_wq_worker+0x10/0x10 ret_from_fork+0x2d/0x50 ? __pfx_io_wq_worker+0x10/0x10 ret_from_fork_asm+0x1a/0x30 Fixes: c732a852b419 ("ublk_drv: add START_USER_RECOVERY and END_USER_RECOVERY support") Reported-and-tested-by: Changhui Zhong Closes: https://lore.kernel.org/all/CAGVVp+UvLiS+bhNXV-h2icwX1dyybbYHeQUuH7RYqUvMQf6N3w@mail.gmail.com Reviewed-by: Ming Lei Signed-off-by: Li Nan Link: https://lore.kernel.org/r/20240904031348.4139545-1-ming.lei@redhat.com Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- drivers/block/ublk_drv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 3b5883932133..fc001e9f95f6 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -2664,6 +2664,8 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub, mutex_lock(&ub->mutex); if (!ublk_can_use_recovery(ub)) goto out_unlock; + if (!ub->nr_queues_ready) + goto out_unlock; /* * START_RECOVERY is only allowd after: * From b4e9331e9064ce88fd430d98bae5cc347bf33ef2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 6 Aug 2024 20:48:43 +0200 Subject: [PATCH 368/374] x86/mm: Fix PTI for i386 some more commit c48b5a4cf3125adb679e28ef093f66ff81368d05 upstream. So it turns out that we have to do two passes of pti_clone_entry_text(), once before initcalls, such that device and late initcalls can use user-mode-helper / modprobe and once after free_initmem() / mark_readonly(). Now obviously mark_readonly() can cause PMD splits, and pti_clone_pgtable() doesn't like that much. Allow the late clone to split PMDs so that pagetables stay in sync. [peterz: Changelog and comments] Reported-by: Guenter Roeck Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Tested-by: Guenter Roeck Link: https://lkml.kernel.org/r/20240806184843.GX37996@noisy.programming.kicks-ass.net Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/pti.c | 45 +++++++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index bfdf5f45b137..851ec8f1363a 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) * * Returns a pointer to a PTE on success, or NULL on failure. */ -static pte_t *pti_user_pagetable_walk_pte(unsigned long address) +static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text) { gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); pmd_t *pmd; @@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address) if (!pmd) return NULL; - /* We can't do anything sensible if we hit a large mapping. */ + /* Large PMD mapping found */ if (pmd_leaf(*pmd)) { - WARN_ON(1); - return NULL; + /* Clear the PMD if we hit a large mapping from the first round */ + if (late_text) { + set_pmd(pmd, __pmd(0)); + } else { + WARN_ON_ONCE(1); + return NULL; + } } if (pmd_none(*pmd)) { @@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void) if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) return; - target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); + target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false); if (WARN_ON(!target_pte)) return; @@ -301,7 +306,7 @@ enum pti_clone_level { static void pti_clone_pgtable(unsigned long start, unsigned long end, - enum pti_clone_level level) + enum pti_clone_level level, bool late_text) { unsigned long addr; @@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end, return; /* Allocate PTE in the user page-table */ - target_pte = pti_user_pagetable_walk_pte(addr); + target_pte = pti_user_pagetable_walk_pte(addr, late_text); if (WARN_ON(!target_pte)) return; @@ -452,7 +457,7 @@ static void __init pti_clone_user_shared(void) phys_addr_t pa = per_cpu_ptr_to_phys((void *)va); pte_t *target_pte; - target_pte = pti_user_pagetable_walk_pte(va); + target_pte = pti_user_pagetable_walk_pte(va, false); if (WARN_ON(!target_pte)) return; @@ -475,7 +480,7 @@ static void __init pti_clone_user_shared(void) start = CPU_ENTRY_AREA_BASE; end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES); - pti_clone_pgtable(start, end, PTI_CLONE_PMD); + pti_clone_pgtable(start, end, PTI_CLONE_PMD, false); } #endif /* CONFIG_X86_64 */ @@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(void) /* * Clone the populated PMDs of the entry text and force it RO. */ -static void pti_clone_entry_text(void) +static void pti_clone_entry_text(bool late) { pti_clone_pgtable((unsigned long) __entry_text_start, (unsigned long) __entry_text_end, - PTI_LEVEL_KERNEL_IMAGE); + PTI_LEVEL_KERNEL_IMAGE, late); } /* @@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void) * pti_set_kernel_image_nonglobal() did to clear the * global bit. */ - pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE); + pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false); /* * pti_clone_pgtable() will set the global bit in any PMDs @@ -638,8 +643,15 @@ void __init pti_init(void) /* Undo all global bits from the init pagetables in head_64.S: */ pti_set_kernel_image_nonglobal(); + /* Replace some of the global bits just for shared entry text: */ - pti_clone_entry_text(); + /* + * This is very early in boot. Device and Late initcalls can do + * modprobe before free_initmem() and mark_readonly(). This + * pti_clone_entry_text() allows those user-mode-helpers to function, + * but notably the text is still RW. + */ + pti_clone_entry_text(false); pti_setup_espfix64(); pti_setup_vsyscall(); } @@ -656,10 +668,11 @@ void pti_finalize(void) if (!boot_cpu_has(X86_FEATURE_PTI)) return; /* - * We need to clone everything (again) that maps parts of the - * kernel image. + * This is after free_initmem() (all initcalls are done) and we've done + * mark_readonly(). Text is now NX which might've split some PMDs + * relative to the early clone. */ - pti_clone_entry_text(); + pti_clone_entry_text(true); pti_clone_kernel_text(); debug_checkwx_user(); From 48cb63ce38b980221aa6a357188b80fe1a64caf8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jouni=20H=C3=B6gander?= Date: Mon, 2 Sep 2024 09:42:40 +0300 Subject: [PATCH 369/374] drm/i915/display: Add mechanism to use sink model when applying quirk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 43cf50eb1408ccb99cab01521263e8cb4cfdc023 upstream. Currently there is no way to apply quirk on device only if certain panel model is installed. This patch implements such mechanism by adding new quirk type intel_dpcd_quirk which contains also sink_oui and sink_device_id fields and using also them to figure out if applying quirk is needed. New intel_init_dpcd_quirks is added and called after drm_dp_read_desc with proper sink device identity read from dpcdc. v3: - !mem_is_zero fixed to mem_is_zero v2: - instead of using struct intel_quirk add new struct intel_dpcd_quirk Signed-off-by: Jouni Högander Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20240902064241.1020965-2-jouni.hogander@intel.com (cherry picked from commit b3b91369908ac63be6f64905448b8ba5cd151875) Signed-off-by: Joonas Lahtinen Signed-off-by: Greg Kroah-Hartman --- .../drm/i915/display/intel_display_types.h | 4 ++ drivers/gpu/drm/i915/display/intel_dp.c | 4 ++ drivers/gpu/drm/i915/display/intel_quirks.c | 51 +++++++++++++++++++ drivers/gpu/drm/i915/display/intel_quirks.h | 5 ++ 4 files changed, 64 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 6747c10da298..48c5809e01b5 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1840,6 +1840,10 @@ struct intel_dp { unsigned long last_oui_write; bool colorimetry_support; + + struct { + unsigned long mask; + } quirks; }; enum lspcon_vendor { diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 8ea00c8ee74a..a7d91ca1d8ba 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -79,6 +79,7 @@ #include "intel_pch_display.h" #include "intel_pps.h" #include "intel_psr.h" +#include "intel_quirks.h" #include "intel_tc.h" #include "intel_vdsc.h" #include "intel_vrr.h" @@ -3941,6 +3942,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd)); + intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident); /* * Read the eDP display control registers. @@ -4053,6 +4055,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd)); + intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident); + intel_dp_update_sink_caps(intel_dp); } diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index 14d5fefc9c5b..bce1f67c918b 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -14,6 +14,11 @@ static void intel_set_quirk(struct intel_display *display, enum intel_quirk_id q display->quirks.mask |= BIT(quirk); } +static void intel_set_dpcd_quirk(struct intel_dp *intel_dp, enum intel_quirk_id quirk) +{ + intel_dp->quirks.mask |= BIT(quirk); +} + /* * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason */ @@ -72,6 +77,21 @@ struct intel_quirk { void (*hook)(struct intel_display *display); }; +struct intel_dpcd_quirk { + int device; + int subsystem_vendor; + int subsystem_device; + u8 sink_oui[3]; + u8 sink_device_id[6]; + void (*hook)(struct intel_dp *intel_dp); +}; + +#define SINK_OUI(first, second, third) { (first), (second), (third) } +#define SINK_DEVICE_ID(first, second, third, fourth, fifth, sixth) \ + { (first), (second), (third), (fourth), (fifth), (sixth) } + +#define SINK_DEVICE_ID_ANY SINK_DEVICE_ID(0, 0, 0, 0, 0, 0) + /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ struct intel_dmi_quirk { void (*hook)(struct intel_display *display); @@ -203,6 +223,9 @@ static struct intel_quirk intel_quirks[] = { { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness }, }; +static struct intel_dpcd_quirk intel_dpcd_quirks[] = { +}; + void intel_init_quirks(struct intel_display *display) { struct pci_dev *d = to_pci_dev(display->drm->dev); @@ -224,7 +247,35 @@ void intel_init_quirks(struct intel_display *display) } } +void intel_init_dpcd_quirks(struct intel_dp *intel_dp, + const struct drm_dp_dpcd_ident *ident) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct pci_dev *d = to_pci_dev(display->drm->dev); + int i; + + for (i = 0; i < ARRAY_SIZE(intel_dpcd_quirks); i++) { + struct intel_dpcd_quirk *q = &intel_dpcd_quirks[i]; + + if (d->device == q->device && + (d->subsystem_vendor == q->subsystem_vendor || + q->subsystem_vendor == PCI_ANY_ID) && + (d->subsystem_device == q->subsystem_device || + q->subsystem_device == PCI_ANY_ID) && + !memcmp(q->sink_oui, ident->oui, sizeof(ident->oui)) && + (!memcmp(q->sink_device_id, ident->device_id, + sizeof(ident->device_id)) || + mem_is_zero(q->sink_device_id, sizeof(q->sink_device_id)))) + q->hook(intel_dp); + } +} + bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk) { return display->quirks.mask & BIT(quirk); } + +bool intel_has_dpcd_quirk(struct intel_dp *intel_dp, enum intel_quirk_id quirk) +{ + return intel_dp->quirks.mask & BIT(quirk); +} diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h index 151c8f4ae576..c8db50b9ab74 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.h +++ b/drivers/gpu/drm/i915/display/intel_quirks.h @@ -9,6 +9,8 @@ #include struct intel_display; +struct intel_dp; +struct drm_dp_dpcd_ident; enum intel_quirk_id { QUIRK_BACKLIGHT_PRESENT, @@ -20,6 +22,9 @@ enum intel_quirk_id { }; void intel_init_quirks(struct intel_display *display); +void intel_init_dpcd_quirks(struct intel_dp *intel_dp, + const struct drm_dp_dpcd_ident *ident); bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk); +bool intel_has_dpcd_quirk(struct intel_dp *intel_dp, enum intel_quirk_id quirk); #endif /* __INTEL_QUIRKS_H__ */ From 6f574d0156890afd3761da2fa44b805c57f6a075 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jouni=20H=C3=B6gander?= Date: Mon, 2 Sep 2024 09:42:41 +0300 Subject: [PATCH 370/374] drm/i915/display: Increase Fast Wake Sync length as a quirk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a13494de53258d8cf82ed3bcd69176bbf7f2640e upstream. In commit "drm/i915/display: Increase number of fast wake precharge pulses" we were increasing Fast Wake sync pulse length to fix problems observed on Dell Precision 5490 laptop with AUO panel. Later we have observed this is causing problems on other panels. Fix these problems by increasing Fast Wake sync pulse length as a quirk applied for Dell Precision 5490 with problematic panel. Fixes: f77772866385 ("drm/i915/display: Increase number of fast wake precharge pulses") Cc: Ville Syrjälä Closes: http://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9739 Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/2246 Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11762 Signed-off-by: Jouni Högander Reviewed-by: Jani Nikula Cc: # v6.10+ Link: https://patchwork.freedesktop.org/patch/msgid/20240902064241.1020965-3-jouni.hogander@intel.com (cherry picked from commit fcba2ed66b39252210f4e739722ebcc5398c2197) Requires: 43cf50eb1408 ("drm/i915/display: Add mechanism to use sink model when applying quirk") Signed-off-by: Joonas Lahtinen Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/display/intel_dp_aux.c | 16 +++++++++++----- drivers/gpu/drm/i915/display/intel_dp_aux.h | 2 +- drivers/gpu/drm/i915/display/intel_psr.c | 2 +- drivers/gpu/drm/i915/display/intel_quirks.c | 19 ++++++++++++++++++- drivers/gpu/drm/i915/display/intel_quirks.h | 1 + 5 files changed, 32 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index b8a53bb174da..be58185a77c0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -13,6 +13,7 @@ #include "intel_dp_aux.h" #include "intel_dp_aux_regs.h" #include "intel_pps.h" +#include "intel_quirks.h" #include "intel_tc.h" #define AUX_CH_NAME_BUFSIZE 6 @@ -142,16 +143,21 @@ static int intel_dp_aux_sync_len(void) return precharge + preamble; } -int intel_dp_aux_fw_sync_len(void) +int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp) { + int precharge = 10; /* 10-16 */ + int preamble = 8; + /* * We faced some glitches on Dell Precision 5490 MTL laptop with panel: * "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20 * is fixing these problems with the panel. It is still within range - * mentioned in eDP specification. + * mentioned in eDP specification. Increasing Fast Wake sync length is + * causing problems with other panels: increase length as a quirk for + * this specific laptop. */ - int precharge = 12; /* 10-16 */ - int preamble = 8; + if (intel_has_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN)) + precharge += 2; return precharge + preamble; } @@ -211,7 +217,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_TIME_OUT_MAX | DP_AUX_CH_CTL_RECEIVE_ERROR | DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) | - DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) | + DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len(intel_dp)) | DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len()); if (intel_tc_port_in_tbt_alt_mode(dig_port)) diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h index 76d1f2ed7c2f..593f58fafab7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.h +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h @@ -20,6 +20,6 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder); void intel_dp_aux_irq_handler(struct drm_i915_private *i915); u32 intel_dp_aux_pack(const u8 *src, int src_bytes); -int intel_dp_aux_fw_sync_len(void); +int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp); #endif /* __INTEL_DP_AUX_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 3c7da862222b..7173ffc7c66c 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1356,7 +1356,7 @@ static bool _compute_alpm_params(struct intel_dp *intel_dp, int tfw_exit_latency = 20; /* eDP spec */ int phy_wake = 4; /* eDP spec */ int preamble = 8; /* eDP spec */ - int precharge = intel_dp_aux_fw_sync_len() - preamble; + int precharge = intel_dp_aux_fw_sync_len(intel_dp) - preamble; u8 max_wake_lines; io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) + diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index bce1f67c918b..dfd8b4960e6d 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -70,6 +70,14 @@ static void quirk_no_pps_backlight_power_hook(struct intel_display *display) drm_info(display->drm, "Applying no pps backlight power quirk\n"); } +static void quirk_fw_sync_len(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + + intel_set_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN); + drm_info(display->drm, "Applying Fast Wake sync pulse count quirk\n"); +} + struct intel_quirk { int device; int subsystem_vendor; @@ -224,6 +232,15 @@ static struct intel_quirk intel_quirks[] = { }; static struct intel_dpcd_quirk intel_dpcd_quirks[] = { + /* Dell Precision 5490 */ + { + .device = 0x7d55, + .subsystem_vendor = 0x1028, + .subsystem_device = 0x0cc7, + .sink_oui = SINK_OUI(0x38, 0xec, 0x11), + .hook = quirk_fw_sync_len, + }, + }; void intel_init_quirks(struct intel_display *display) @@ -265,7 +282,7 @@ void intel_init_dpcd_quirks(struct intel_dp *intel_dp, !memcmp(q->sink_oui, ident->oui, sizeof(ident->oui)) && (!memcmp(q->sink_device_id, ident->device_id, sizeof(ident->device_id)) || - mem_is_zero(q->sink_device_id, sizeof(q->sink_device_id)))) + !memchr_inv(q->sink_device_id, 0, sizeof(q->sink_device_id)))) q->hook(intel_dp); } } diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h index c8db50b9ab74..cafdebda7535 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.h +++ b/drivers/gpu/drm/i915/display/intel_quirks.h @@ -19,6 +19,7 @@ enum intel_quirk_id { QUIRK_INVERT_BRIGHTNESS, QUIRK_LVDS_SSC_DISABLE, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK, + QUIRK_FW_SYNC_LEN, }; void intel_init_quirks(struct intel_display *display); From 01681aa609b5f110502f56c4e3b2938efcf4a5bc Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Thu, 29 Aug 2024 18:25:49 +0100 Subject: [PATCH 371/374] btrfs: fix race between direct IO write and fsync when using same fd commit cd9253c23aedd61eb5ff11f37a36247cd46faf86 upstream. If we have 2 threads that are using the same file descriptor and one of them is doing direct IO writes while the other is doing fsync, we have a race where we can end up either: 1) Attempt a fsync without holding the inode's lock, triggering an assertion failures when assertions are enabled; 2) Do an invalid memory access from the fsync task because the file private points to memory allocated on stack by the direct IO task and it may be used by the fsync task after the stack was destroyed. The race happens like this: 1) A user space program opens a file descriptor with O_DIRECT; 2) The program spawns 2 threads using libpthread for example; 3) One of the threads uses the file descriptor to do direct IO writes, while the other calls fsync using the same file descriptor. 4) Call task A the thread doing direct IO writes and task B the thread doing fsyncs; 5) Task A does a direct IO write, and at btrfs_direct_write() sets the file's private to an on stack allocated private with the member 'fsync_skip_inode_lock' set to true; 6) Task B enters btrfs_sync_file() and sees that there's a private structure associated to the file which has 'fsync_skip_inode_lock' set to true, so it skips locking the inode's VFS lock; 7) Task A completes the direct IO write, and resets the file's private to NULL since it had no prior private and our private was stack allocated. Then it unlocks the inode's VFS lock; 8) Task B enters btrfs_get_ordered_extents_for_logging(), then the assertion that checks the inode's VFS lock is held fails, since task B never locked it and task A has already unlocked it. The stack trace produced is the following: assertion failed: inode_is_locked(&inode->vfs_inode), in fs/btrfs/ordered-data.c:983 ------------[ cut here ]------------ kernel BUG at fs/btrfs/ordered-data.c:983! Oops: invalid opcode: 0000 [#1] PREEMPT SMP PTI CPU: 9 PID: 5072 Comm: worker Tainted: G U OE 6.10.5-1-default #1 openSUSE Tumbleweed 69f48d427608e1c09e60ea24c6c55e2ca1b049e8 Hardware name: Acer Predator PH315-52/Covini_CFS, BIOS V1.12 07/28/2020 RIP: 0010:btrfs_get_ordered_extents_for_logging.cold+0x1f/0x42 [btrfs] Code: 50 d6 86 c0 e8 (...) RSP: 0018:ffff9e4a03dcfc78 EFLAGS: 00010246 RAX: 0000000000000054 RBX: ffff9078a9868e98 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff907dce4a7800 RDI: ffff907dce4a7800 RBP: ffff907805518800 R08: 0000000000000000 R09: ffff9e4a03dcfb38 R10: ffff9e4a03dcfb30 R11: 0000000000000003 R12: ffff907684ae7800 R13: 0000000000000001 R14: ffff90774646b600 R15: 0000000000000000 FS: 00007f04b96006c0(0000) GS:ffff907dce480000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f32acbfc000 CR3: 00000001fd4fa005 CR4: 00000000003726f0 Call Trace: ? __die_body.cold+0x14/0x24 ? die+0x2e/0x50 ? do_trap+0xca/0x110 ? do_error_trap+0x6a/0x90 ? btrfs_get_ordered_extents_for_logging.cold+0x1f/0x42 [btrfs bb26272d49b4cdc847cf3f7faadd459b62caee9a] ? exc_invalid_op+0x50/0x70 ? btrfs_get_ordered_extents_for_logging.cold+0x1f/0x42 [btrfs bb26272d49b4cdc847cf3f7faadd459b62caee9a] ? asm_exc_invalid_op+0x1a/0x20 ? btrfs_get_ordered_extents_for_logging.cold+0x1f/0x42 [btrfs bb26272d49b4cdc847cf3f7faadd459b62caee9a] ? btrfs_get_ordered_extents_for_logging.cold+0x1f/0x42 [btrfs bb26272d49b4cdc847cf3f7faadd459b62caee9a] btrfs_sync_file+0x21a/0x4d0 [btrfs bb26272d49b4cdc847cf3f7faadd459b62caee9a] ? __seccomp_filter+0x31d/0x4f0 __x64_sys_fdatasync+0x4f/0x90 do_syscall_64+0x82/0x160 ? do_futex+0xcb/0x190 ? __x64_sys_futex+0x10e/0x1d0 ? switch_fpu_return+0x4f/0xd0 ? syscall_exit_to_user_mode+0x72/0x220 ? do_syscall_64+0x8e/0x160 ? syscall_exit_to_user_mode+0x72/0x220 ? do_syscall_64+0x8e/0x160 ? syscall_exit_to_user_mode+0x72/0x220 ? do_syscall_64+0x8e/0x160 ? syscall_exit_to_user_mode+0x72/0x220 ? do_syscall_64+0x8e/0x160 entry_SYSCALL_64_after_hwframe+0x76/0x7e Another problem here is if task B grabs the private pointer and then uses it after task A has finished, since the private was allocated in the stack of task A, it results in some invalid memory access with a hard to predict result. This issue, triggering the assertion, was observed with QEMU workloads by two users in the Link tags below. Fix this by not relying on a file's private to pass information to fsync that it should skip locking the inode and instead pass this information through a special value stored in current->journal_info. This is safe because in the relevant section of the direct IO write path we are not holding a transaction handle, so current->journal_info is NULL. The following C program triggers the issue: $ cat repro.c /* Get the O_DIRECT definition. */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include static int fd; static ssize_t do_write(int fd, const void *buf, size_t count, off_t offset) { while (count > 0) { ssize_t ret; ret = pwrite(fd, buf, count, offset); if (ret < 0) { if (errno == EINTR) continue; return ret; } count -= ret; buf += ret; } return 0; } static void *fsync_loop(void *arg) { while (1) { int ret; ret = fsync(fd); if (ret != 0) { perror("Fsync failed"); exit(6); } } } int main(int argc, char *argv[]) { long pagesize; void *write_buf; pthread_t fsyncer; int ret; if (argc != 2) { fprintf(stderr, "Use: %s \n", argv[0]); return 1; } fd = open(argv[1], O_WRONLY | O_CREAT | O_TRUNC | O_DIRECT, 0666); if (fd == -1) { perror("Failed to open/create file"); return 1; } pagesize = sysconf(_SC_PAGE_SIZE); if (pagesize == -1) { perror("Failed to get page size"); return 2; } ret = posix_memalign(&write_buf, pagesize, pagesize); if (ret) { perror("Failed to allocate buffer"); return 3; } ret = pthread_create(&fsyncer, NULL, fsync_loop, NULL); if (ret != 0) { fprintf(stderr, "Failed to create writer thread: %d\n", ret); return 4; } while (1) { ret = do_write(fd, write_buf, pagesize, 0); if (ret != 0) { perror("Write failed"); exit(5); } } return 0; } $ mkfs.btrfs -f /dev/sdi $ mount /dev/sdi /mnt/sdi $ timeout 10 ./repro /mnt/sdi/foo Usually the race is triggered within less than 1 second. A test case for fstests will follow soon. Reported-by: Paulo Dias Link: https://bugzilla.kernel.org/show_bug.cgi?id=219187 Reported-by: Andreas Jahn Link: https://bugzilla.kernel.org/show_bug.cgi?id=219199 Reported-by: syzbot+4704b3cc972bd76024f1@syzkaller.appspotmail.com Link: https://lore.kernel.org/linux-btrfs/00000000000044ff540620d7dee2@google.com/ Fixes: 939b656bc8ab ("btrfs: fix corruption after buffer fault in during direct IO append write") CC: stable@vger.kernel.org # 5.15+ Reviewed-by: Josef Bacik Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/ctree.h | 1 - fs/btrfs/file.c | 25 ++++++++++--------------- fs/btrfs/transaction.h | 6 ++++++ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a56209d275c1..b2e4b30b8fae 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -457,7 +457,6 @@ struct btrfs_file_private { void *filldir_buf; u64 last_index; struct extent_state *llseek_cached_state; - bool fsync_skip_inode_lock; }; static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index ca434f0cd27f..66dfee873906 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1558,13 +1558,6 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) if (IS_ERR_OR_NULL(dio)) { ret = PTR_ERR_OR_ZERO(dio); } else { - struct btrfs_file_private stack_private = { 0 }; - struct btrfs_file_private *private; - const bool have_private = (file->private_data != NULL); - - if (!have_private) - file->private_data = &stack_private; - /* * If we have a synchoronous write, we must make sure the fsync * triggered by the iomap_dio_complete() call below doesn't @@ -1573,13 +1566,10 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) * partial writes due to the input buffer (or parts of it) not * being already faulted in. */ - private = file->private_data; - private->fsync_skip_inode_lock = true; + ASSERT(current->journal_info == NULL); + current->journal_info = BTRFS_TRANS_DIO_WRITE_STUB; ret = iomap_dio_complete(dio); - private->fsync_skip_inode_lock = false; - - if (!have_private) - file->private_data = NULL; + current->journal_info = NULL; } /* No increment (+=) because iomap returns a cumulative value. */ @@ -1811,7 +1801,6 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx) */ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { - struct btrfs_file_private *private = file->private_data; struct dentry *dentry = file_dentry(file); struct inode *inode = d_inode(dentry); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); @@ -1821,7 +1810,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) int ret = 0, err; u64 len; bool full_sync; - const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false); + bool skip_ilock = false; + + if (current->journal_info == BTRFS_TRANS_DIO_WRITE_STUB) { + skip_ilock = true; + current->journal_info = NULL; + lockdep_assert_held(&inode->i_rwsem); + } trace_btrfs_sync_file(file, datasync); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 4e451ab173b1..62ec85f4b777 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -27,6 +27,12 @@ struct btrfs_root_item; struct btrfs_root; struct btrfs_path; +/* + * Signal that a direct IO write is in progress, to avoid deadlock for sync + * direct IO writes when fsync is called during the direct IO write path. + */ +#define BTRFS_TRANS_DIO_WRITE_STUB ((void *) 1) + /* Radix-tree tag for roots that are part of the trasaction. */ #define BTRFS_ROOT_TRANS_TAG 0 From 12bc88ca29dec974f387fc8cd27c7257da83458c Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Thu, 5 Sep 2024 13:15:37 +0200 Subject: [PATCH 372/374] spi: spi-fsl-lpspi: Fix off-by-one in prescale max commit ff949d981c775332be94be70397ee1df20bc68e5 upstream. The commit 783bf5d09f86 ("spi: spi-fsl-lpspi: limit PRESCALE bit in TCR register") doesn't implement the prescaler maximum as intended. The maximum allowed value for i.MX93 should be 1 and for i.MX7ULP it should be 7. So this needs also a adjustment of the comparison in the scldiv calculation. Fixes: 783bf5d09f86 ("spi: spi-fsl-lpspi: limit PRESCALE bit in TCR register") Signed-off-by: Stefan Wahren Link: https://patch.msgid.link/20240905111537.90389-1-wahrenst@gmx.net Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spi-fsl-lpspi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 30d56f8775d7..d2f603e1014c 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -136,7 +136,7 @@ static struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = { }; static struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = { - .prescale_max = 8, + .prescale_max = 7, }; static const struct of_device_id fsl_lpspi_dt_ids[] = { @@ -336,7 +336,7 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) div = DIV_ROUND_UP(perclk_rate, config.speed_hz); - for (prescale = 0; prescale < prescale_max; prescale++) { + for (prescale = 0; prescale <= prescale_max; prescale++) { scldiv = div / (1 << prescale) - 2; if (scldiv < 256) { fsl_lpspi->config.prescale = prescale; From 60e01e9316a3832fe98e2fbc8171c2355b13c604 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Wed, 7 Aug 2024 17:37:11 +0800 Subject: [PATCH 373/374] LoongArch: Use accessors to page table entries instead of direct dereference commit 4574815abf43e2bf05643e1b3f7a2e5d6df894f0 upstream. As very well explained in commit 20a004e7b017cce282 ("arm64: mm: Use READ_ONCE/WRITE_ONCE when accessing page tables"), an architecture whose page table walker can modify the PTE in parallel must use READ_ONCE()/ WRITE_ONCE() macro to avoid any compiler transformation. So apply that to LoongArch which is such an architecture, in order to avoid potential problems. Similar to commit edf955647269422e ("riscv: Use accessors to page table entries instead of direct dereference"). Signed-off-by: Huacai Chen Signed-off-by: Greg Kroah-Hartman --- arch/loongarch/include/asm/hugetlb.h | 4 +-- arch/loongarch/include/asm/kfence.h | 6 ++-- arch/loongarch/include/asm/pgtable.h | 48 +++++++++++++++++----------- arch/loongarch/kvm/mmu.c | 8 ++--- arch/loongarch/mm/hugetlbpage.c | 6 ++-- arch/loongarch/mm/init.c | 10 +++--- arch/loongarch/mm/kasan_init.c | 10 +++--- arch/loongarch/mm/pgtable.c | 2 +- 8 files changed, 52 insertions(+), 42 deletions(-) diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h index aa44b3fe43dd..5da32c00d483 100644 --- a/arch/loongarch/include/asm/hugetlb.h +++ b/arch/loongarch/include/asm/hugetlb.h @@ -34,7 +34,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t clear; - pte_t pte = *ptep; + pte_t pte = ptep_get(ptep); pte_val(clear) = (unsigned long)invalid_pte_table; set_pte_at(mm, addr, ptep, clear); @@ -65,7 +65,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t pte, int dirty) { - int changed = !pte_same(*ptep, pte); + int changed = !pte_same(ptep_get(ptep), pte); if (changed) { set_pte_at(vma->vm_mm, addr, ptep, pte); diff --git a/arch/loongarch/include/asm/kfence.h b/arch/loongarch/include/asm/kfence.h index 92636e82957c..da9e93024626 100644 --- a/arch/loongarch/include/asm/kfence.h +++ b/arch/loongarch/include/asm/kfence.h @@ -53,13 +53,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) { pte_t *pte = virt_to_kpte(addr); - if (WARN_ON(!pte) || pte_none(*pte)) + if (WARN_ON(!pte) || pte_none(ptep_get(pte))) return false; if (protect) - set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT))); + set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~(_PAGE_VALID | _PAGE_PRESENT))); else - set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT))); + set_pte(pte, __pte(pte_val(ptep_get(pte)) | (_PAGE_VALID | _PAGE_PRESENT))); preempt_disable(); local_flush_tlb_one(addr); diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index af3acdf3481a..c4b1a7595c4e 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -106,6 +106,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define KFENCE_AREA_START (VMEMMAP_END + 1) #define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1) +#define ptep_get(ptep) READ_ONCE(*(ptep)) +#define pmdp_get(pmdp) READ_ONCE(*(pmdp)) + #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) #ifndef __PAGETABLE_PMD_FOLDED @@ -147,11 +150,6 @@ static inline int p4d_present(p4d_t p4d) return p4d_val(p4d) != (unsigned long)invalid_pud_table; } -static inline void p4d_clear(p4d_t *p4dp) -{ - p4d_val(*p4dp) = (unsigned long)invalid_pud_table; -} - static inline pud_t *p4d_pgtable(p4d_t p4d) { return (pud_t *)p4d_val(p4d); @@ -159,7 +157,12 @@ static inline pud_t *p4d_pgtable(p4d_t p4d) static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) { - *p4d = p4dval; + WRITE_ONCE(*p4d, p4dval); +} + +static inline void p4d_clear(p4d_t *p4dp) +{ + set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table)); } #define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) @@ -193,17 +196,20 @@ static inline int pud_present(pud_t pud) return pud_val(pud) != (unsigned long)invalid_pmd_table; } -static inline void pud_clear(pud_t *pudp) +static inline pmd_t *pud_pgtable(pud_t pud) { - pud_val(*pudp) = ((unsigned long)invalid_pmd_table); + return (pmd_t *)pud_val(pud); } -static inline pmd_t *pud_pgtable(pud_t pud) +static inline void set_pud(pud_t *pud, pud_t pudval) { - return (pmd_t *)pud_val(pud); + WRITE_ONCE(*pud, pudval); } -#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0) +static inline void pud_clear(pud_t *pudp) +{ + set_pud(pudp, __pud((unsigned long)invalid_pmd_table)); +} #define pud_phys(pud) PHYSADDR(pud_val(pud)) #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) @@ -231,12 +237,15 @@ static inline int pmd_present(pmd_t pmd) return pmd_val(pmd) != (unsigned long)invalid_pte_table; } -static inline void pmd_clear(pmd_t *pmdp) +static inline void set_pmd(pmd_t *pmd, pmd_t pmdval) { - pmd_val(*pmdp) = ((unsigned long)invalid_pte_table); + WRITE_ONCE(*pmd, pmdval); } -#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0) +static inline void pmd_clear(pmd_t *pmdp) +{ + set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table)); +} #define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) @@ -314,7 +323,8 @@ extern void paging_init(void); static inline void set_pte(pte_t *ptep, pte_t pteval) { - *ptep = pteval; + WRITE_ONCE(*ptep, pteval); + if (pte_val(pteval) & _PAGE_GLOBAL) { pte_t *buddy = ptep_buddy(ptep); /* @@ -341,8 +351,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [global] "r" (page_global)); #else /* !CONFIG_SMP */ - if (pte_none(*buddy)) - pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; + if (pte_none(ptep_get(buddy))) + WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL)); #endif /* CONFIG_SMP */ } } @@ -350,7 +360,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { /* Preserve global status for the pair */ - if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) + if (pte_val(ptep_get(ptep_buddy(ptep))) & _PAGE_GLOBAL) set_pte(ptep, __pte(_PAGE_GLOBAL)); else set_pte(ptep, __pte(0)); @@ -589,7 +599,7 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd) static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { - pmd_t old = *pmdp; + pmd_t old = pmdp_get(pmdp); pmd_clear(pmdp); diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 98883aa23ab8..357f775abd49 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -695,19 +695,19 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, * value) and then p*d_offset() walks into the target huge page instead * of the old page table (sees the new value). */ - pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); + pgd = pgdp_get(pgd_offset(kvm->mm, hva)); if (pgd_none(pgd)) goto out; - p4d = READ_ONCE(*p4d_offset(&pgd, hva)); + p4d = p4dp_get(p4d_offset(&pgd, hva)); if (p4d_none(p4d) || !p4d_present(p4d)) goto out; - pud = READ_ONCE(*pud_offset(&p4d, hva)); + pud = pudp_get(pud_offset(&p4d, hva)); if (pud_none(pud) || !pud_present(pud)) goto out; - pmd = READ_ONCE(*pmd_offset(&pud, hva)); + pmd = pmdp_get(pmd_offset(&pud, hva)); if (pmd_none(pmd) || !pmd_present(pmd)) goto out; diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c index 12222c56cb59..e4068906143b 100644 --- a/arch/loongarch/mm/hugetlbpage.c +++ b/arch/loongarch/mm/hugetlbpage.c @@ -39,11 +39,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, pmd_t *pmd = NULL; pgd = pgd_offset(mm, addr); - if (pgd_present(*pgd)) { + if (pgd_present(pgdp_get(pgd))) { p4d = p4d_offset(pgd, addr); - if (p4d_present(*p4d)) { + if (p4d_present(p4dp_get(p4d))) { pud = pud_offset(p4d, addr); - if (pud_present(*pud)) + if (pud_present(pudp_get(pud))) pmd = pmd_offset(pud, addr); } } diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index bf789d114c2d..8a87a482c8f4 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -141,7 +141,7 @@ void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, unsigned long addr, unsigned long next) { - int huge = pmd_val(*pmd) & _PAGE_HUGE; + int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE; if (huge) vmemmap_verify((pte_t *)pmd, node, addr, next); @@ -173,7 +173,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) pud_t *pud; pmd_t *pmd; - if (p4d_none(*p4d)) { + if (p4d_none(p4dp_get(p4d))) { pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!pud) panic("%s: Failed to allocate memory\n", __func__); @@ -184,7 +184,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) } pud = pud_offset(p4d, addr); - if (pud_none(*pud)) { + if (pud_none(pudp_get(pud))) { pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!pmd) panic("%s: Failed to allocate memory\n", __func__); @@ -195,7 +195,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) } pmd = pmd_offset(pud, addr); - if (!pmd_present(*pmd)) { + if (!pmd_present(pmdp_get(pmd))) { pte_t *pte; pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); @@ -216,7 +216,7 @@ void __init __set_fixmap(enum fixed_addresses idx, BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); ptep = populate_kernel_pte(addr); - if (!pte_none(*ptep)) { + if (!pte_none(ptep_get(ptep))) { pte_ERROR(*ptep); return; } diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c index c608adc99845..427d6b1aec09 100644 --- a/arch/loongarch/mm/kasan_init.c +++ b/arch/loongarch/mm/kasan_init.c @@ -105,7 +105,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early) { - if (__pmd_none(early, READ_ONCE(*pmdp))) { + if (__pmd_none(early, pmdp_get(pmdp))) { phys_addr_t pte_phys = early ? __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node); if (!early) @@ -118,7 +118,7 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early) { - if (__pud_none(early, READ_ONCE(*pudp))) { + if (__pud_none(early, pudp_get(pudp))) { phys_addr_t pmd_phys = early ? __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node); if (!early) @@ -131,7 +131,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early) { - if (__p4d_none(early, READ_ONCE(*p4dp))) { + if (__p4d_none(early, p4dp_get(p4dp))) { phys_addr_t pud_phys = early ? __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node); if (!early) @@ -154,7 +154,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, : kasan_alloc_zeroed_page(node); next = addr + PAGE_SIZE; set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); - } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep))); + } while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep))); } static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, @@ -166,7 +166,7 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, do { next = pmd_addr_end(addr, end); kasan_pte_populate(pmdp, addr, next, node, early); - } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp))); + } while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp))); } static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr, diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c index bda018150000..eb6a29b491a7 100644 --- a/arch/loongarch/mm/pgtable.c +++ b/arch/loongarch/mm/pgtable.c @@ -128,7 +128,7 @@ pmd_t mk_pmd(struct page *page, pgprot_t prot) void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { - *pmdp = pmd; + WRITE_ONCE(*pmdp, pmd); flush_tlb_all(); } From 049be94099ea5ba8338526c5a4f4f404b9dcaf54 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 12 Sep 2024 11:13:13 +0200 Subject: [PATCH 374/374] Linux 6.10.10 Link: https://lore.kernel.org/r/20240910092622.245959861@linuxfoundation.org Tested-by: Salvatore Bonaccorso Tested-by: Florian Fainelli Tested-by: Mark Brown Tested-by: Shuah Khan Tested-by: Linux Kernel Functional Testing Tested-by: Christian Heusel Tested-by: Justin M. Forbes Tested-by: Pavel Machek (CIP) Tested-by: Ron Economos Tested-by: Kexy Biscuit Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5945cce6b066..9b4614c0fcbb 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 10 -SUBLEVEL = 9 +SUBLEVEL = 10 EXTRAVERSION = NAME = Baby Opossum Posse