RDMA/rxe: Add MASK suffix for RXE_READ_OR_ATOMIC and RXE_WRITE_OR_SEND
authorXiao Yang <yangx.jy@fujitsu.com>
Tue, 14 Sep 2021 08:02:52 +0000 (16:02 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 28 Sep 2021 14:42:24 +0000 (11:42 -0300)
To reflect the intention, since it is not just a single bit.

Link: https://lore.kernel.org/r/20210914080253.1145353-3-yangx.jy@fujitsu.com
Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/sw/rxe/rxe_opcode.h
drivers/infiniband/sw/rxe/rxe_req.c
drivers/infiniband/sw/rxe/rxe_resp.c

index bbeccb1dcec76201746b73c60dc5cf5c774c7d8e..e3a46b287c154a2ac1707399b900f7f78e7ffa6c 100644 (file)
@@ -82,8 +82,8 @@ enum rxe_hdr_mask {
 
        RXE_LOOPBACK_MASK       = BIT(NUM_HDR_TYPES + 12),
 
-       RXE_READ_OR_ATOMIC      = (RXE_READ_MASK | RXE_ATOMIC_MASK),
-       RXE_WRITE_OR_SEND       = (RXE_WRITE_MASK | RXE_SEND_MASK),
+       RXE_READ_OR_ATOMIC_MASK = (RXE_READ_MASK | RXE_ATOMIC_MASK),
+       RXE_WRITE_OR_SEND_MASK  = (RXE_WRITE_MASK | RXE_SEND_MASK),
        RXE_READ_OR_WRITE_MASK  = (RXE_READ_MASK | RXE_WRITE_MASK),
 };
 
index 2981b3ef3cc0551b03b483ae293e735dc513a796..fe275fcaffbd919ef282d2197497509c6ba01369 100644 (file)
@@ -461,7 +461,7 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
        if (err)
                return err;
 
-       if (pkt->mask & RXE_WRITE_OR_SEND) {
+       if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
                if (wqe->wr.send_flags & IB_SEND_INLINE) {
                        u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
 
@@ -678,13 +678,13 @@ next_wqe:
        }
 
        mask = rxe_opcode[opcode].mask;
-       if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
+       if (unlikely(mask & RXE_READ_OR_ATOMIC_MASK)) {
                if (check_init_depth(qp, wqe))
                        goto exit;
        }
 
        mtu = get_mtu(qp);
-       payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
+       payload = (mask & RXE_WRITE_OR_SEND_MASK) ? wqe->dma.resid : 0;
        if (payload > mtu) {
                if (qp_type(qp) == IB_QPT_UD) {
                        /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
index ea7d3ee016d9d5a0e517b1518ac0f3a0cd2e9089..4af0dc95784a811cce142b2a99beb437db46f3c7 100644 (file)
@@ -362,7 +362,7 @@ static enum resp_states check_resource(struct rxe_qp *qp,
                }
        }
 
-       if (pkt->mask & RXE_READ_OR_ATOMIC) {
+       if (pkt->mask & RXE_READ_OR_ATOMIC_MASK) {
                /* it is the requesters job to not send
                 * too many read/atomic ops, we just
                 * recycle the responder resource queue