From: Yang Yingliang Date: Thu, 19 May 2022 03:21:08 +0000 (+0800) Subject: net: wwan: t7xx: use GFP_ATOMIC under spin lock in t7xx_cldma_gpd_set_next_ptr() X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=9ee152ee3ee3568b1a3302f2bb816d5440e6f5f1;p=linux.git net: wwan: t7xx: use GFP_ATOMIC under spin lock in t7xx_cldma_gpd_set_next_ptr() Sometimes t7xx_cldma_gpd_set_next_ptr() is called under spin lock, so add 'gfp_mask' parameter in t7xx_cldma_gpd_set_next_ptr() to pass the flag. Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface") Reported-by: Hulk Robot Signed-off-by: Yang Yingliang Reviewed-by: Loic Poulain Link: https://lore.kernel.org/r/20220519032108.2996400-1-yangyingliang@huawei.com Signed-off-by: Jakub Kicinski --- diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c index 0c52801ed0dea..6ff30cb8eb16f 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -91,9 +91,9 @@ static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_p } static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req, - size_t size) + size_t size, gfp_t gfp_mask) { - req->skb = __dev_alloc_skb(size, GFP_KERNEL); + req->skb = __dev_alloc_skb(size, gfp_mask); if (!req->skb) return -ENOMEM; @@ -174,7 +174,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool spin_unlock_irqrestore(&queue->ring_lock, flags); req = queue->rx_refill; - ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size); + ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL); if (ret) return ret; @@ -402,7 +402,7 @@ static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, s if (!req->gpd) goto err_free_req; - val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size); + val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL); if (val) goto err_free_pool; @@ -801,7 +801,7 @@ static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum) if (req->skb) continue; - ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size); + ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC); if (ret) break;