struct nvme_request *nr = nvme_req(req);
 
        if (ns) {
-               pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
+               pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %u blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
                       ns->disk ? ns->disk->disk_name : "?",
                       nvme_get_opcode_str(nr->cmd->common.opcode),
                       nr->cmd->common.opcode,
-                      (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)),
-                      (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
+                      nvme_sect_to_lba(ns, blk_rq_pos(req)),
+                      blk_rq_bytes(req) >> ns->head->lba_shift,
                       nvme_get_error_status_str(nr->status),
                       nr->status >> 8 & 7,     /* Status Code Type */
                       nr->status & 0xff,       /* Status Code */
 
        if (queue_max_discard_segments(req->q) == 1) {
                u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
-               u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
+               u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9);
 
                range[0].cattr = cpu_to_le32(0);
                range[0].nlb = cpu_to_le32(nlb);
        } else {
                __rq_for_each_bio(bio, req) {
                        u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
-                       u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+                       u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift;
 
                        if (n < segments) {
                                range[n].cattr = cpu_to_le32(0);
        u64 ref48;
 
        /* both rw and write zeroes share the same reftag format */
-       switch (ns->guard_type) {
+       switch (ns->head->guard_type) {
        case NVME_NVM_NS_16B_GUARD:
                cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
                break;
        cmnd->write_zeroes.slba =
                cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
        cmnd->write_zeroes.length =
-               cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+               cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
 
-       if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC))
+       if (!(req->cmd_flags & REQ_NOUNMAP) &&
+           (ns->head->features & NVME_NS_DEAC))
                cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC);
 
        if (nvme_ns_has_pi(ns)) {
                cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
 
-               switch (ns->pi_type) {
+               switch (ns->head->pi_type) {
                case NVME_NS_DPS_PI_TYPE1:
                case NVME_NS_DPS_PI_TYPE2:
                        nvme_set_ref_tag(ns, cmnd, req);
        cmnd->rw.cdw3 = 0;
        cmnd->rw.metadata = 0;
        cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
-       cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+       cmnd->rw.length =
+               cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
        cmnd->rw.reftag = 0;
        cmnd->rw.apptag = 0;
        cmnd->rw.appmask = 0;
 
-       if (ns->ms) {
+       if (ns->head->ms) {
                /*
                 * If formated with metadata, the block layer always provides a
                 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
                        control |= NVME_RW_PRINFO_PRACT;
                }
 
-               switch (ns->pi_type) {
+               switch (ns->head->pi_type) {
                case NVME_NS_DPS_PI_TYPE3:
                        control |= NVME_RW_PRINFO_PRCHK_GUARD;
                        break;
 {
        struct blk_integrity integrity = { };
 
-       switch (ns->pi_type) {
+       switch (ns->head->pi_type) {
        case NVME_NS_DPS_PI_TYPE3:
-               switch (ns->guard_type) {
+               switch (ns->head->guard_type) {
                case NVME_NVM_NS_16B_GUARD:
                        integrity.profile = &t10_pi_type3_crc;
                        integrity.tag_size = sizeof(u16) + sizeof(u32);
                break;
        case NVME_NS_DPS_PI_TYPE1:
        case NVME_NS_DPS_PI_TYPE2:
-               switch (ns->guard_type) {
+               switch (ns->head->guard_type) {
                case NVME_NVM_NS_16B_GUARD:
                        integrity.profile = &t10_pi_type1_crc;
                        integrity.tag_size = sizeof(u16);
                break;
        }
 
-       integrity.tuple_size = ns->ms;
+       integrity.tuple_size = ns->head->ms;
        blk_integrity_register(disk, &integrity);
        blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
 }
        int ret = 0;
        u32 elbaf;
 
-       ns->pi_size = 0;
-       ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+       ns->head->pi_size = 0;
+       ns->head->ms = le16_to_cpu(id->lbaf[lbaf].ms);
        if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
-               ns->pi_size = sizeof(struct t10_pi_tuple);
-               ns->guard_type = NVME_NVM_NS_16B_GUARD;
+               ns->head->pi_size = sizeof(struct t10_pi_tuple);
+               ns->head->guard_type = NVME_NVM_NS_16B_GUARD;
                goto set_pi;
        }
 
        if (nvme_elbaf_sts(elbaf))
                goto free_data;
 
-       ns->guard_type = nvme_elbaf_guard_type(elbaf);
-       switch (ns->guard_type) {
+       ns->head->guard_type = nvme_elbaf_guard_type(elbaf);
+       switch (ns->head->guard_type) {
        case NVME_NVM_NS_64B_GUARD:
-               ns->pi_size = sizeof(struct crc64_pi_tuple);
+               ns->head->pi_size = sizeof(struct crc64_pi_tuple);
                break;
        case NVME_NVM_NS_16B_GUARD:
-               ns->pi_size = sizeof(struct t10_pi_tuple);
+               ns->head->pi_size = sizeof(struct t10_pi_tuple);
                break;
        default:
                break;
 free_data:
        kfree(nvm);
 set_pi:
-       if (ns->pi_size && (first || ns->ms == ns->pi_size))
-               ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+       if (ns->head->pi_size && (first || ns->head->ms == ns->head->pi_size))
+               ns->head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
        else
-               ns->pi_type = 0;
+               ns->head->pi_type = 0;
 
        return ret;
 }
        if (ret)
                return ret;
 
-       ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
-       if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
+       ns->head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+       if (!ns->head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
                return 0;
 
        if (ctrl->ops->flags & NVME_F_FABRICS) {
                if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
                        return 0;
 
-               ns->features |= NVME_NS_EXT_LBAS;
+               ns->head->features |= NVME_NS_EXT_LBAS;
 
                /*
                 * The current fabrics transport drivers support namespace
                 * gain the ability to use other metadata formats.
                 */
                if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
-                       ns->features |= NVME_NS_METADATA_SUPPORTED;
+                       ns->head->features |= NVME_NS_METADATA_SUPPORTED;
        } else {
                /*
                 * For PCIe controllers, we can't easily remap the separate
                 * We allow extended LBAs for the passthrough interface, though.
                 */
                if (id->flbas & NVME_NS_FLBAS_META_EXT)
-                       ns->features |= NVME_NS_EXT_LBAS;
+                       ns->head->features |= NVME_NS_EXT_LBAS;
                else
-                       ns->features |= NVME_NS_METADATA_SUPPORTED;
+                       ns->head->features |= NVME_NS_METADATA_SUPPORTED;
        }
        return 0;
 }
                struct nvme_ns *ns, struct nvme_id_ns *id)
 {
        sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
-       u32 bs = 1U << ns->lba_shift;
+       u32 bs = 1U << ns->head->lba_shift;
        u32 atomic_bs, phys_bs, io_opt = 0;
 
        /*
         * The block layer can't support LBA sizes larger than the page size
         * yet, so catch this early and don't allow block I/O.
         */
-       if (ns->lba_shift > PAGE_SHIFT) {
+       if (ns->head->lba_shift > PAGE_SHIFT) {
                capacity = 0;
                bs = (1 << 9);
        }
         * I/O to namespaces with metadata except when the namespace supports
         * PI, as it can strip/insert in that case.
         */
-       if (ns->ms) {
+       if (ns->head->ms) {
                if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
-                   (ns->features & NVME_NS_METADATA_SUPPORTED))
+                   (ns->head->features & NVME_NS_METADATA_SUPPORTED))
                        nvme_init_integrity(disk, ns,
                                            ns->ctrl->max_integrity_segments);
                else if (!nvme_ns_has_pi(ns))
 
        blk_mq_freeze_queue(ns->disk->queue);
        lbaf = nvme_lbaf_index(id->flbas);
-       ns->lba_shift = id->lbaf[lbaf].ds;
+       ns->head->lba_shift = id->lbaf[lbaf].ds;
        nvme_set_queue_limits(ns->ctrl, ns->queue);
 
        ret = nvme_configure_metadata(ns, id);
         * do not return zeroes.
         */
        if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
-               ns->features |= NVME_NS_DEAC;
+               ns->head->features |= NVME_NS_DEAC;
        set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
        set_bit(NVME_NS_READY, &ns->flags);
        blk_mq_unfreeze_queue(ns->disk->queue);
 
 {
        struct request_queue *q = ns->queue;
 
-       blk_queue_chunk_sectors(q, ns->zsze);
+       blk_queue_chunk_sectors(q, ns->head->zsze);
        blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
 
        return blk_revalidate_disk_zones(ns->disk, NULL);
                goto free_data;
        }
 
-       ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze));
-       if (!is_power_of_2(ns->zsze)) {
+       ns->head->zsze =
+               nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze));
+       if (!is_power_of_2(ns->head->zsze)) {
                dev_warn(ns->ctrl->device,
                        "invalid zone size:%llu for namespace:%u\n",
-                       ns->zsze, ns->head->ns_id);
+                       ns->head->zsze, ns->head->ns_id);
                status = -ENODEV;
                goto free_data;
        }
                                   sizeof(struct nvme_zone_descriptor);
 
        nr_zones = min_t(unsigned int, nr_zones,
-                        get_capacity(ns->disk) >> ilog2(ns->zsze));
+                        get_capacity(ns->disk) >> ilog2(ns->head->zsze));
 
        bufsize = sizeof(struct nvme_zone_report) +
                nr_zones * sizeof(struct nvme_zone_descriptor);
 
        zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
        zone.cond = entry->zs >> 4;
-       zone.len = ns->zsze;
+       zone.len = ns->head->zsze;
        zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
        zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
        if (zone.cond == BLK_ZONE_COND_FULL)
        c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
        c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
 
-       sector &= ~(ns->zsze - 1);
+       sector &= ~(ns->head->zsze - 1);
        while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
                memset(report, 0, buflen);
 
                        zone_idx++;
                }
 
-               sector += ns->zsze * nz;
+               sector += ns->head->zsze * nz;
        }
 
        if (zone_idx > 0)