return false;
 
        if (ALIGN(len, dma_get_cache_alignment()) > space) {
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.tx_spare_full++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, tx_spare_full);
                return false;
        }
 
                return false;
 
        if (space < HNS3_MAX_SGL_SIZE) {
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.tx_spare_full++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, tx_spare_full);
                return false;
        }
 
 
        ret = hns3_handle_vtags(ring, skb);
        if (unlikely(ret < 0)) {
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.tx_vlan_err++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, tx_vlan_err);
                return ret;
        } else if (ret == HNS3_INNER_VLAN_TAG) {
                inner_vtag = skb_vlan_tag_get(skb);
 
                ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
                if (unlikely(ret < 0)) {
-                       u64_stats_update_begin(&ring->syncp);
-                       ring->stats.tx_l4_proto_err++;
-                       u64_stats_update_end(&ring->syncp);
+                       hns3_ring_stats_update(ring, tx_l4_proto_err);
                        return ret;
                }
 
                                      &type_cs_vlan_tso,
                                      &ol_type_vlan_len_msec);
                if (unlikely(ret < 0)) {
-                       u64_stats_update_begin(&ring->syncp);
-                       ring->stats.tx_l2l3l4_err++;
-                       u64_stats_update_end(&ring->syncp);
+                       hns3_ring_stats_update(ring, tx_l2l3l4_err);
                        return ret;
                }
 
                ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
                                   &type_cs_vlan_tso, &desc_cb->send_bytes);
                if (unlikely(ret < 0)) {
-                       u64_stats_update_begin(&ring->syncp);
-                       ring->stats.tx_tso_err++;
-                       u64_stats_update_end(&ring->syncp);
+                       hns3_ring_stats_update(ring, tx_tso_err);
                        return ret;
                }
        }
        }
 
        if (unlikely(dma_mapping_error(dev, dma))) {
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.sw_err_cnt++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, sw_err_cnt);
                return -ENOMEM;
        }
 
         * recursion level of over HNS3_MAX_RECURSION_LEVEL.
         */
        if (bd_num == UINT_MAX) {
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.over_max_recursion++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, over_max_recursion);
                return -ENOMEM;
        }
 
         */
        if (skb->len > HNS3_MAX_TSO_SIZE ||
            (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.hw_limitation++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, hw_limitation);
                return -ENOMEM;
        }
 
        if (__skb_linearize(skb)) {
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.sw_err_cnt++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, sw_err_cnt);
                return -ENOMEM;
        }
 
 
                bd_num = hns3_tx_bd_count(skb->len);
 
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.tx_copy++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, tx_copy);
        }
 
 out:
                return bd_num;
        }
 
-       u64_stats_update_begin(&ring->syncp);
-       ring->stats.tx_busy++;
-       u64_stats_update_end(&ring->syncp);
+       hns3_ring_stats_update(ring, tx_busy);
 
        return -EBUSY;
 }
        ring->pending_buf += num;
 
        if (!doorbell) {
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.tx_more++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, tx_more);
                return;
        }
 
        ret = skb_copy_bits(skb, 0, buf, size);
        if (unlikely(ret < 0)) {
                hns3_tx_spare_rollback(ring, cb_len);
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.copy_bits_err++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, copy_bits_err);
                return ret;
        }
 
        dma_sync_single_for_device(ring_to_dev(ring), dma, size,
                                   DMA_TO_DEVICE);
 
-       u64_stats_update_begin(&ring->syncp);
-       ring->stats.tx_bounce++;
-       u64_stats_update_end(&ring->syncp);
+       hns3_ring_stats_update(ring, tx_bounce);
+
        return bd_num;
 }
 
        nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
        if (unlikely(nents < 0)) {
                hns3_tx_spare_rollback(ring, cb_len);
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.skb2sgl_err++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, skb2sgl_err);
                return -ENOMEM;
        }
 
                                DMA_TO_DEVICE);
        if (unlikely(!sgt->nents)) {
                hns3_tx_spare_rollback(ring, cb_len);
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.map_sg_err++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, map_sg_err);
                return -ENOMEM;
        }
 
        for (i = 0; i < sgt->nents; i++)
                bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
                                         sg_dma_len(sgt->sgl + i));
-
-       u64_stats_update_begin(&ring->syncp);
-       ring->stats.tx_sgl++;
-       u64_stats_update_end(&ring->syncp);
+       hns3_ring_stats_update(ring, tx_sgl);
 
        return bd_num;
 }
        if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
                hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
 
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.sw_err_cnt++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, sw_err_cnt);
 
                return NETDEV_TX_OK;
        }
        for (i = 0; i < cleand_count; i++) {
                desc_cb = &ring->desc_cb[ring->next_to_use];
                if (desc_cb->reuse_flag) {
-                       u64_stats_update_begin(&ring->syncp);
-                       ring->stats.reuse_pg_cnt++;
-                       u64_stats_update_end(&ring->syncp);
+                       hns3_ring_stats_update(ring, reuse_pg_cnt);
 
                        hns3_reuse_buffer(ring, ring->next_to_use);
                } else {
                        ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
                        if (ret) {
-                               u64_stats_update_begin(&ring->syncp);
-                               ring->stats.sw_err_cnt++;
-                               u64_stats_update_end(&ring->syncp);
+                               hns3_ring_stats_update(ring, sw_err_cnt);
 
                                hns3_rl_err(ring_to_netdev(ring),
                                            "alloc rx buffer failed: %d\n",
                        }
                        hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
 
-                       u64_stats_update_begin(&ring->syncp);
-                       ring->stats.non_reuse_pg++;
-                       u64_stats_update_end(&ring->syncp);
+                       hns3_ring_stats_update(ring, non_reuse_pg);
                }
 
                ring_ptr_move_fw(ring, next_to_use);
        void *frag = napi_alloc_frag(frag_size);
 
        if (unlikely(!frag)) {
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.frag_alloc_err++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, frag_alloc_err);
 
                hns3_rl_err(ring_to_netdev(ring),
                            "failed to allocate rx frag\n");
        skb_add_rx_frag(skb, i, virt_to_page(frag),
                        offset_in_page(frag), frag_size, frag_size);
 
-       u64_stats_update_begin(&ring->syncp);
-       ring->stats.frag_alloc++;
-       u64_stats_update_end(&ring->syncp);
+       hns3_ring_stats_update(ring, frag_alloc);
        return 0;
 }
 
            hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
                return false;
 
-       u64_stats_update_begin(&ring->syncp);
-       ring->stats.csum_complete++;
-       u64_stats_update_end(&ring->syncp);
+       hns3_ring_stats_update(ring, csum_complete);
        skb->ip_summed = CHECKSUM_COMPLETE;
        skb->csum = csum_unfold((__force __sum16)csum);
 
        if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
                                 BIT(HNS3_RXD_OL3E_B) |
                                 BIT(HNS3_RXD_OL4E_B)))) {
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.l3l4_csum_err++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, l3l4_csum_err);
 
                return;
        }
        skb = ring->skb;
        if (unlikely(!skb)) {
                hns3_rl_err(netdev, "alloc rx skb fail\n");
-
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.sw_err_cnt++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, sw_err_cnt);
 
                return -ENOMEM;
        }
        if (ring->page_pool)
                skb_mark_for_recycle(skb);
 
-       u64_stats_update_begin(&ring->syncp);
-       ring->stats.seg_pkt_cnt++;
-       u64_stats_update_end(&ring->syncp);
+       hns3_ring_stats_update(ring, seg_pkt_cnt);
 
        ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
        __skb_put(skb, ring->pull_len);
        ret = hns3_set_gro_and_checksum(ring, skb, l234info,
                                        bd_base_info, ol_info, csum);
        if (unlikely(ret)) {
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.rx_err_cnt++;
-               u64_stats_update_end(&ring->syncp);
+               hns3_ring_stats_update(ring, rx_err_cnt);
                return ret;
        }
 
                if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
                        ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
                        if (ret) {
-                               u64_stats_update_begin(&ring->syncp);
-                               ring->stats.sw_err_cnt++;
-                               u64_stats_update_end(&ring->syncp);
+                               hns3_ring_stats_update(ring, sw_err_cnt);
                                /* if alloc new buffer fail, exit directly
                                 * and reclear in up flow.
                                 */