cma_id_put(id_priv->id.context);
 
        kfree(id_priv->id.route.path_rec);
+       kfree(id_priv->id.route.path_rec_inbound);
+       kfree(id_priv->id.route.path_rec_outbound);
 
        put_net(id_priv->id.route.addr.dev_addr.net);
        kfree(id_priv);
 }
 EXPORT_SYMBOL(rdma_set_min_rnr_timer);
 
+static void route_set_path_rec_inbound(struct cma_work *work,
+                                      struct sa_path_rec *path_rec)
+{
+       struct rdma_route *route = &work->id->id.route;
+
+       if (!route->path_rec_inbound) {
+               route->path_rec_inbound =
+                       kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL);
+               if (!route->path_rec_inbound)
+                       return;
+       }
+
+       *route->path_rec_inbound = *path_rec;
+}
+
+static void route_set_path_rec_outbound(struct cma_work *work,
+                                       struct sa_path_rec *path_rec)
+{
+       struct rdma_route *route = &work->id->id.route;
+
+       if (!route->path_rec_outbound) {
+               route->path_rec_outbound =
+                       kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL);
+               if (!route->path_rec_outbound)
+                       return;
+       }
+
+       *route->path_rec_outbound = *path_rec;
+}
+
 static void cma_query_handler(int status, struct sa_path_rec *path_rec,
-                             void *context)
+                             int num_prs, void *context)
 {
        struct cma_work *work = context;
        struct rdma_route *route;
+       int i;
 
        route = &work->id->id.route;
 
-       if (!status) {
-               route->num_pri_alt_paths = 1;
-               *route->path_rec = *path_rec;
-       } else {
-               work->old_state = RDMA_CM_ROUTE_QUERY;
-               work->new_state = RDMA_CM_ADDR_RESOLVED;
-               work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
-               work->event.status = status;
-               pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
-                                    status);
+       if (status)
+               goto fail;
+
+       for (i = 0; i < num_prs; i++) {
+               if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP))
+                       *route->path_rec = path_rec[i];
+               else if (path_rec[i].flags & IB_PATH_INBOUND)
+                       route_set_path_rec_inbound(work, &path_rec[i]);
+               else if (path_rec[i].flags & IB_PATH_OUTBOUND)
+                       route_set_path_rec_outbound(work, &path_rec[i]);
+       }
+       if (!route->path_rec) {
+               status = -EINVAL;
+               goto fail;
        }
 
+       route->num_pri_alt_paths = 1;
+       queue_work(cma_wq, &work->work);
+       return;
+
+fail:
+       work->old_state = RDMA_CM_ROUTE_QUERY;
+       work->new_state = RDMA_CM_ADDR_RESOLVED;
+       work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
+       work->event.status = status;
+       pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
+                            status);
        queue_work(cma_wq, &work->work);
 }
 
 
 #include <rdma/ib_marshall.h>
 #include <rdma/ib_addr.h>
 #include <rdma/opa_addr.h>
+#include <rdma/rdma_cm.h>
 #include "sa.h"
 #include "core_priv.h"
 
 };
 
 struct ib_sa_query {
-       void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
+       void (*callback)(struct ib_sa_query *sa_query, int status,
+                        int num_prs, struct ib_sa_mad *mad);
        void (*release)(struct ib_sa_query *);
        struct ib_sa_client    *client;
        struct ib_sa_port      *port;
        u32                     seq; /* Local svc request sequence number */
        unsigned long           timeout; /* Local svc timeout */
        u8                      path_use; /* How will the pathrecord be used */
+
+       /* A separate buffer to save pathrecords of a response, as in cases
+        * like IB/netlink, mulptiple pathrecords are supported, so that
+        * mad->data is not large enough to hold them
+        */
+       void                    *resp_pr_data;
 };
 
 #define IB_SA_ENABLE_LOCAL_SERVICE     0x00000001
 #define IB_SA_QUERY_OPA                        0x00000004
 
 struct ib_sa_path_query {
-       void (*callback)(int, struct sa_path_rec *, void *);
+       void (*callback)(int status, struct sa_path_rec *rec,
+                        int num_paths, void *context);
        void *context;
        struct ib_sa_query sa_query;
        struct sa_path_rec *conv_pr;
 
        if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
            sa_rec->reversible != 0)
-               query->path_use = LS_RESOLVE_PATH_USE_GMP;
+               query->path_use = LS_RESOLVE_PATH_USE_ALL;
        else
                query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
        header->path_use = query->path_use;
 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
                                           const struct nlmsghdr *nlh)
 {
+       struct ib_path_rec_data *srec, *drec;
+       struct ib_sa_path_query *path_query;
        struct ib_mad_send_wc mad_send_wc;
-       struct ib_sa_mad *mad = NULL;
        const struct nlattr *head, *curr;
-       struct ib_path_rec_data  *rec;
-       int len, rem;
+       struct ib_sa_mad *mad = NULL;
+       int len, rem, num_prs = 0;
        u32 mask = 0;
        int status = -EIO;
 
-       if (query->callback) {
-               head = (const struct nlattr *) nlmsg_data(nlh);
-               len = nlmsg_len(nlh);
-               switch (query->path_use) {
-               case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
-                       mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
-                       break;
+       if (!query->callback)
+               goto out;
 
-               case LS_RESOLVE_PATH_USE_ALL:
-               case LS_RESOLVE_PATH_USE_GMP:
-               default:
-                       mask = IB_PATH_PRIMARY | IB_PATH_GMP |
-                               IB_PATH_BIDIRECTIONAL;
-                       break;
+       path_query = container_of(query, struct ib_sa_path_query, sa_query);
+       mad = query->mad_buf->mad;
+       if (!path_query->conv_pr &&
+           (be16_to_cpu(mad->mad_hdr.attr_id) == IB_SA_ATTR_PATH_REC)) {
+               /* Need a larger buffer for possible multiple PRs */
+               query->resp_pr_data = kvcalloc(RDMA_PRIMARY_PATH_MAX_REC_NUM,
+                                              sizeof(*drec), GFP_KERNEL);
+               if (!query->resp_pr_data) {
+                       query->callback(query, -ENOMEM, 0, NULL);
+                       return;
                }
-               nla_for_each_attr(curr, head, len, rem) {
-                       if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
-                               rec = nla_data(curr);
-                               /*
-                                * Get the first one. In the future, we may
-                                * need to get up to 6 pathrecords.
-                                */
-                               if ((rec->flags & mask) == mask) {
-                                       mad = query->mad_buf->mad;
-                                       mad->mad_hdr.method |=
-                                               IB_MGMT_METHOD_RESP;
-                                       memcpy(mad->data, rec->path_rec,
-                                              sizeof(rec->path_rec));
-                                       status = 0;
-                                       break;
-                               }
-                       }
+       }
+
+       head = (const struct nlattr *) nlmsg_data(nlh);
+       len = nlmsg_len(nlh);
+       switch (query->path_use) {
+       case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
+               mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
+               break;
+
+       case LS_RESOLVE_PATH_USE_ALL:
+               mask = IB_PATH_PRIMARY;
+               break;
+
+       case LS_RESOLVE_PATH_USE_GMP:
+       default:
+               mask = IB_PATH_PRIMARY | IB_PATH_GMP |
+                       IB_PATH_BIDIRECTIONAL;
+               break;
+       }
+
+       drec = (struct ib_path_rec_data *)query->resp_pr_data;
+       nla_for_each_attr(curr, head, len, rem) {
+               if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD)
+                       continue;
+
+               srec = nla_data(curr);
+               if ((srec->flags & mask) != mask)
+                       continue;
+
+               status = 0;
+               if (!drec) {
+                       memcpy(mad->data, srec->path_rec,
+                              sizeof(srec->path_rec));
+                       num_prs = 1;
+                       break;
                }
-               query->callback(query, status, mad);
+
+               memcpy(drec, srec, sizeof(*drec));
+               drec++;
+               num_prs++;
+               if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM)
+                       break;
        }
 
+       if (!status)
+               mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
+
+       query->callback(query, status, num_prs, mad);
+       kvfree(query->resp_pr_data);
+       query->resp_pr_data = NULL;
+
+out:
        mad_send_wc.send_buf = query->mad_buf;
        mad_send_wc.status = IB_WC_SUCCESS;
        send_handler(query->mad_buf->mad_agent, &mad_send_wc);
                return PR_IB_SUPPORTED;
 }
 
+static void ib_sa_pr_callback_single(struct ib_sa_path_query *query,
+                                    int status, struct ib_sa_mad *mad)
+{
+       struct sa_path_rec rec = {};
+
+       ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+                 mad->data, &rec);
+       rec.rec_type = SA_PATH_REC_TYPE_IB;
+       sa_path_set_dmac_zero(&rec);
+
+       if (query->conv_pr) {
+               struct sa_path_rec opa;
+
+               memset(&opa, 0, sizeof(struct sa_path_rec));
+               sa_convert_path_ib_to_opa(&opa, &rec);
+               query->callback(status, &opa, 1, query->context);
+       } else {
+               query->callback(status, &rec, 1, query->context);
+       }
+}
+
+/**
+ * ib_sa_pr_callback_multiple() - Parse path records then do callback.
+ *
+ * In a multiple-PR case the PRs are saved in "query->resp_pr_data"
+ * (instead of"mad->data") and with "ib_path_rec_data" structure format,
+ * so that rec->flags can be set to indicate the type of PR.
+ * This is valid only in IB fabric.
+ */
+static void ib_sa_pr_callback_multiple(struct ib_sa_path_query *query,
+                                      int status, int num_prs,
+                                      struct ib_path_rec_data *rec_data)
+{
+       struct sa_path_rec *rec;
+       int i;
+
+       rec = kvcalloc(num_prs, sizeof(*rec), GFP_KERNEL);
+       if (!rec) {
+               query->callback(-ENOMEM, NULL, 0, query->context);
+               return;
+       }
+
+       for (i = 0; i < num_prs; i++) {
+               ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+                         rec_data[i].path_rec, rec + i);
+               rec[i].rec_type = SA_PATH_REC_TYPE_IB;
+               sa_path_set_dmac_zero(rec + i);
+               rec[i].flags = rec_data[i].flags;
+       }
+
+       query->callback(status, rec, num_prs, query->context);
+       kvfree(rec);
+}
+
 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
-                                   int status,
+                                   int status, int num_prs,
                                    struct ib_sa_mad *mad)
 {
        struct ib_sa_path_query *query =
                container_of(sa_query, struct ib_sa_path_query, sa_query);
+       struct sa_path_rec rec;
 
-       if (mad) {
-               struct sa_path_rec rec;
-
-               if (sa_query->flags & IB_SA_QUERY_OPA) {
-                       ib_unpack(opa_path_rec_table,
-                                 ARRAY_SIZE(opa_path_rec_table),
-                                 mad->data, &rec);
-                       rec.rec_type = SA_PATH_REC_TYPE_OPA;
-                       query->callback(status, &rec, query->context);
-               } else {
-                       ib_unpack(path_rec_table,
-                                 ARRAY_SIZE(path_rec_table),
-                                 mad->data, &rec);
-                       rec.rec_type = SA_PATH_REC_TYPE_IB;
-                       sa_path_set_dmac_zero(&rec);
-
-                       if (query->conv_pr) {
-                               struct sa_path_rec opa;
+       if (!mad || !num_prs) {
+               query->callback(status, NULL, 0, query->context);
+               return;
+       }
 
-                               memset(&opa, 0, sizeof(struct sa_path_rec));
-                               sa_convert_path_ib_to_opa(&opa, &rec);
-                               query->callback(status, &opa, query->context);
-                       } else {
-                               query->callback(status, &rec, query->context);
-                       }
+       if (sa_query->flags & IB_SA_QUERY_OPA) {
+               if (num_prs != 1) {
+                       query->callback(-EINVAL, NULL, 0, query->context);
+                       return;
                }
-       } else
-               query->callback(status, NULL, query->context);
+
+               ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
+                         mad->data, &rec);
+               rec.rec_type = SA_PATH_REC_TYPE_OPA;
+               query->callback(status, &rec, num_prs, query->context);
+       } else {
+               if (!sa_query->resp_pr_data)
+                       ib_sa_pr_callback_single(query, status, mad);
+               else
+                       ib_sa_pr_callback_multiple(query, status, num_prs,
+                                                  sa_query->resp_pr_data);
+       }
 }
 
 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
                       unsigned long timeout_ms, gfp_t gfp_mask,
                       void (*callback)(int status,
                                        struct sa_path_rec *resp,
-                                       void *context),
+                                       int num_paths, void *context),
                       void *context,
                       struct ib_sa_query **sa_query)
 {
 EXPORT_SYMBOL(ib_sa_path_rec_get);
 
 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
-                                       int status,
+                                       int status, int num_prs,
                                        struct ib_sa_mad *mad)
 {
        struct ib_sa_mcmember_query *query =
 
 /* Support GuidInfoRecord */
 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
-                                       int status,
+                                       int status, int num_paths,
                                        struct ib_sa_mad *mad)
 {
        struct ib_sa_guidinfo_query *query =
 }
 
 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
-                                             int status,
+                                             int status, int num_prs,
                                              struct ib_sa_mad *mad)
 {
        unsigned long flags;
                        /* No callback -- already got recv */
                        break;
                case IB_WC_RESP_TIMEOUT_ERR:
-                       query->callback(query, -ETIMEDOUT, NULL);
+                       query->callback(query, -ETIMEDOUT, 0, NULL);
                        break;
                case IB_WC_WR_FLUSH_ERR:
-                       query->callback(query, -EINTR, NULL);
+                       query->callback(query, -EINTR, 0, NULL);
                        break;
                default:
-                       query->callback(query, -EIO, NULL);
+                       query->callback(query, -EIO, 0, NULL);
                        break;
                }
 
                if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
                        query->callback(query,
                                        mad_recv_wc->recv_buf.mad->mad_hdr.status ?
-                                       -EINVAL : 0,
+                                       -EINVAL : 0, 1,
                                        (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
                else
-                       query->callback(query, -EIO, NULL);
+                       query->callback(query, -EIO, 0, NULL);
        }
 
        ib_free_recv_mad(mad_recv_wc);