Simple overlapping changes in stmmac driver.
Adjust skb_gro_flush_final_remcsum function signature to make GRO list
changes in net-next, as per Stephen Rothwell's example merge
resolution.
Signed-off-by: David S. Miller <davem@davemloft.net>
        skbh = (struct sk_buff **)phys_to_virt(addr);
        skb = *skbh;
  
 +      if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
 +              memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 +
 +              if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
 +                                        &ns)) {
 +                      shhwtstamps.hwtstamp = ns_to_ktime(ns);
 +                      skb_tstamp_tx(skb, &shhwtstamps);
 +              } else {
 +                      dev_warn(dev, "fman_port_get_tstamp failed!\n");
 +              }
 +      }
 +
        if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
                nr_frags = skb_shinfo(skb)->nr_frags;
-               dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
-                                sizeof(struct qm_sg_entry) * (1 + nr_frags),
+               dma_unmap_single(dev, addr,
+                                qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
                                 dma_dir);
  
                /* The sgt buffer has been allocated with netdev_alloc_frag(),
 
        }
  }
  
 +static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
 +{
 +      u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
 +
 +      mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
 +      if (qmode != MTL_QUEUE_AVB)
 +              mtl_tx_op |= MTL_OP_MODE_TXQEN;
 +      else
 +              mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
 +
 +      writel(mtl_tx_op, ioaddr +  MTL_CHAN_TX_OP_MODE(channel));
 +}
 +
+ static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+ {
+       u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ 
+       value &= ~DMA_RBSZ_MASK;
+       value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
+ 
+       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ }
+ 
  const struct stmmac_dma_ops dwmac4_dma_ops = {
        .reset = dwmac4_dma_reset,
        .init = dwmac4_dma_init,
        .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
        .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
        .enable_tso = dwmac4_enable_tso,
 +      .qmode = dwmac4_qmode,
+       .set_bfsize = dwmac4_set_bfsize,
  };
  
  const struct stmmac_dma_ops dwmac410_dma_ops = {
        .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
        .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
        .enable_tso = dwmac4_enable_tso,
 +      .qmode = dwmac4_qmode,
+       .set_bfsize = dwmac4_set_bfsize,
  };
 
        void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
 +      void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
+       void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
  };
  
  #define stmmac_reset(__priv, __args...) \
        stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
  #define stmmac_enable_tso(__priv, __args...) \
        stmmac_do_void_callback(__priv, dma, enable_tso, __args)
 +#define stmmac_dma_qmode(__priv, __args...) \
 +      stmmac_do_void_callback(__priv, dma, qmode, __args)
+ #define stmmac_set_dma_bfsize(__priv, __args...) \
+       stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
  
  struct mac_device_info;
  struct net_device;
 
        if (PTR_ERR(pp) != -EINPROGRESS)
                NAPI_GRO_CB(skb)->flush |= flush;
  }
 -                                             struct sk_buff **pp,
+ static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
++                                             struct sk_buff *pp,
+                                              int flush,
+                                              struct gro_remcsum *grc)
+ {
+       if (PTR_ERR(pp) != -EINPROGRESS) {
+               NAPI_GRO_CB(skb)->flush |= flush;
+               skb_gro_remcsum_cleanup(skb, grc);
+               skb->remcsum_offload = 0;
+       }
+ }
  #else
 -static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
 +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
  {
        NAPI_GRO_CB(skb)->flush |= flush;
  }
 -                                             struct sk_buff **pp,
+ static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
++                                             struct sk_buff *pp,
+                                              int flush,
+                                              struct gro_remcsum *grc)
+ {
+       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_remcsum_cleanup(skb, grc);
+       skb->remcsum_offload = 0;
+ }
  #endif
  
  static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
 
        spinlock_t              acurs_lock;     /* protect cursors */
  #endif
        struct work_struct      close_work;     /* peer sent some closing */
 +      struct tasklet_struct   rx_tsklet;      /* Receiver tasklet for SMC-D */
 +      u8                      rx_off;         /* receive offset:
 +                                               * 0 for SMC-R, 32 for SMC-D
 +                                               */
 +      u64                     peer_token;     /* SMC-D token of peer */
  };
  
+ struct smc_connect_info {
+       int                     flags;
+       int                     alen;
+       struct sockaddr         addr;
+ };
+ 
  struct smc_sock {                             /* smc sock container */
        struct sock             sk;
        struct socket           *clcsock;       /* internal tcp socket */