2  * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
 
   3  * was acquired by Western Digital in 2012.
 
   5  * Copyright 2012 sTec, Inc.
 
   6  * Copyright (c) 2017 Western Digital Corporation or its affiliates.
 
   8  * This file is part of the Linux kernel, and is made available under
 
   9  * the terms of the GNU General Public License version 2.
 
  12 #include <linux/kernel.h>
 
  13 #include <linux/module.h>
 
  14 #include <linux/init.h>
 
  15 #include <linux/pci.h>
 
  16 #include <linux/slab.h>
 
  17 #include <linux/spinlock.h>
 
  18 #include <linux/blkdev.h>
 
  19 #include <linux/sched.h>
 
  20 #include <linux/interrupt.h>
 
  21 #include <linux/compiler.h>
 
  22 #include <linux/workqueue.h>
 
  23 #include <linux/delay.h>
 
  24 #include <linux/time.h>
 
  25 #include <linux/hdreg.h>
 
  26 #include <linux/dma-mapping.h>
 
  27 #include <linux/completion.h>
 
  28 #include <linux/scatterlist.h>
 
  29 #include <linux/version.h>
 
  30 #include <linux/err.h>
 
  31 #include <linux/aer.h>
 
  32 #include <linux/wait.h>
 
  33 #include <linux/uio.h>
 
  34 #include <linux/stringify.h>
 
  35 #include <scsi/scsi.h>
 
  38 #include <linux/uaccess.h>
 
  39 #include <asm/unaligned.h>
 
  41 #include "skd_s1120.h"
 
  43 static int skd_dbg_level;
 
  44 static int skd_isr_comp_limit = 4;
 
  50         STEC_LINK_UNKNOWN = 0xFF
 
  54         SKD_FLUSH_INITIALIZER,
 
  55         SKD_FLUSH_ZERO_SIZE_FIRST,
 
  56         SKD_FLUSH_DATA_SECOND,
 
  59 #define SKD_ASSERT(expr) \
 
  61                 if (unlikely(!(expr))) { \
 
  62                         pr_err("Assertion failed! %s,%s,%s,line=%d\n",  \
 
  63                                # expr, __FILE__, __func__, __LINE__); \
 
  67 #define DRV_NAME "skd"
 
  68 #define DRV_VERSION "2.2.1"
 
  69 #define DRV_BUILD_ID "0260"
 
  70 #define PFX DRV_NAME ": "
 
  71 #define DRV_BIN_VERSION 0x100
 
  72 #define DRV_VER_COMPL   "2.2.1." DRV_BUILD_ID
 
  74 MODULE_LICENSE("GPL");
 
  76 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
 
  77 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
 
  79 #define PCI_VENDOR_ID_STEC      0x1B39
 
  80 #define PCI_DEVICE_ID_S1120     0x0001
 
  82 #define SKD_FUA_NV              (1 << 1)
 
  83 #define SKD_MINORS_PER_DEVICE   16
 
  85 #define SKD_MAX_QUEUE_DEPTH     200u
 
  87 #define SKD_PAUSE_TIMEOUT       (5 * 1000)
 
  89 #define SKD_N_FITMSG_BYTES      (512u)
 
  90 #define SKD_MAX_REQ_PER_MSG     14
 
  92 #define SKD_N_SPECIAL_CONTEXT   32u
 
  93 #define SKD_N_SPECIAL_FITMSG_BYTES      (128u)
 
  95 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
 
  96  * 128KB limit.  That allows 4096*4K = 16M xfer size
 
  98 #define SKD_N_SG_PER_REQ_DEFAULT 256u
 
  99 #define SKD_N_SG_PER_SPECIAL    256u
 
 101 #define SKD_N_COMPLETION_ENTRY  256u
 
 102 #define SKD_N_READ_CAP_BYTES    (8u)
 
 104 #define SKD_N_INTERNAL_BYTES    (512u)
 
 106 #define SKD_SKCOMP_SIZE                                                 \
 
 107         ((sizeof(struct fit_completion_entry_v1) +                      \
 
 108           sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
 
 110 /* 5 bits of uniqifier, 0xF800 */
 
 111 #define SKD_ID_INCR             (0x400)
 
 112 #define SKD_ID_TABLE_MASK       (3u << 8u)
 
 113 #define  SKD_ID_RW_REQUEST      (0u << 8u)
 
 114 #define  SKD_ID_INTERNAL        (1u << 8u)
 
 115 #define  SKD_ID_SPECIAL_REQUEST (2u << 8u)
 
 116 #define  SKD_ID_FIT_MSG         (3u << 8u)
 
 117 #define SKD_ID_SLOT_MASK        0x00FFu
 
 118 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
 
 120 #define SKD_N_TIMEOUT_SLOT      4u
 
 121 #define SKD_TIMEOUT_SLOT_MASK   3u
 
 123 #define SKD_N_MAX_SECTORS 2048u
 
 125 #define SKD_MAX_RETRIES 2u
 
 127 #define SKD_TIMER_SECONDS(seconds) (seconds)
 
 128 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
 
 130 #define INQ_STD_NBYTES 36
 
 132 enum skd_drvr_state {
 
 136         SKD_DRVR_STATE_STARTING,
 
 137         SKD_DRVR_STATE_ONLINE,
 
 138         SKD_DRVR_STATE_PAUSING,
 
 139         SKD_DRVR_STATE_PAUSED,
 
 140         SKD_DRVR_STATE_DRAINING_TIMEOUT,
 
 141         SKD_DRVR_STATE_RESTARTING,
 
 142         SKD_DRVR_STATE_RESUMING,
 
 143         SKD_DRVR_STATE_STOPPING,
 
 144         SKD_DRVR_STATE_FAULT,
 
 145         SKD_DRVR_STATE_DISAPPEARED,
 
 146         SKD_DRVR_STATE_PROTOCOL_MISMATCH,
 
 147         SKD_DRVR_STATE_BUSY_ERASE,
 
 148         SKD_DRVR_STATE_BUSY_SANITIZE,
 
 149         SKD_DRVR_STATE_BUSY_IMMINENT,
 
 150         SKD_DRVR_STATE_WAIT_BOOT,
 
 151         SKD_DRVR_STATE_SYNCING,
 
 154 #define SKD_WAIT_BOOT_TIMO      SKD_TIMER_SECONDS(90u)
 
 155 #define SKD_STARTING_TIMO       SKD_TIMER_SECONDS(8u)
 
 156 #define SKD_RESTARTING_TIMO     SKD_TIMER_MINUTES(4u)
 
 157 #define SKD_DRAINING_TIMO       SKD_TIMER_SECONDS(6u)
 
 158 #define SKD_BUSY_TIMO           SKD_TIMER_MINUTES(20u)
 
 159 #define SKD_STARTED_BUSY_TIMO   SKD_TIMER_SECONDS(60u)
 
 160 #define SKD_START_WAIT_SECONDS  90u
 
 166         SKD_REQ_STATE_COMPLETED,
 
 167         SKD_REQ_STATE_TIMEOUT,
 
 168         SKD_REQ_STATE_ABORTED,
 
 171 enum skd_fit_msg_state {
 
 176 enum skd_check_status_action {
 
 177         SKD_CHECK_STATUS_REPORT_GOOD,
 
 178         SKD_CHECK_STATUS_REPORT_SMART_ALERT,
 
 179         SKD_CHECK_STATUS_REQUEUE_REQUEST,
 
 180         SKD_CHECK_STATUS_REPORT_ERROR,
 
 181         SKD_CHECK_STATUS_BUSY_IMMINENT,
 
 185         struct fit_msg_hdr      fmh;
 
 186         struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
 
 189 struct skd_fitmsg_context {
 
 190         enum skd_fit_msg_state state;
 
 192         struct skd_fitmsg_context *next;
 
 199         struct skd_msg_buf *msg_buf;
 
 200         dma_addr_t mb_dma_address;
 
 203 struct skd_request_context {
 
 204         enum skd_req_state state;
 
 206         struct skd_request_context *next;
 
 215         enum dma_data_direction data_dir;
 
 216         struct scatterlist *sg;
 
 220         struct fit_sg_descriptor *sksg_list;
 
 221         dma_addr_t sksg_dma_address;
 
 223         struct fit_completion_entry_v1 completion;
 
 225         struct fit_comp_error_info err_info;
 
 229 struct skd_special_context {
 
 230         struct skd_request_context req;
 
 235         dma_addr_t db_dma_address;
 
 237         struct skd_msg_buf *msg_buf;
 
 238         dma_addr_t mb_dma_address;
 
 251         struct sg_iovec *iov;
 
 252         struct sg_iovec no_iov_iov;
 
 254         struct skd_special_context *skspcl;
 
 257 typedef enum skd_irq_type {
 
 263 #define SKD_MAX_BARS                    2
 
 266         void __iomem *mem_map[SKD_MAX_BARS];
 
 267         resource_size_t mem_phys[SKD_MAX_BARS];
 
 268         u32 mem_size[SKD_MAX_BARS];
 
 270         struct skd_msix_entry *msix_entries;
 
 272         struct pci_dev *pdev;
 
 273         int pcie_error_reporting_is_enabled;
 
 276         struct gendisk *disk;
 
 277         struct request_queue *queue;
 
 278         struct device *class_dev;
 
 286         enum skd_drvr_state state;
 
 290         u32 cur_max_queue_depth;
 
 291         u32 queue_low_water_mark;
 
 292         u32 dev_max_queue_depth;
 
 294         u32 num_fitmsg_context;
 
 297         u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
 
 299         struct skd_fitmsg_context *skmsg_free_list;
 
 300         struct skd_fitmsg_context *skmsg_table;
 
 302         struct skd_request_context *skreq_free_list;
 
 303         struct skd_request_context *skreq_table;
 
 305         struct skd_special_context *skspcl_free_list;
 
 306         struct skd_special_context *skspcl_table;
 
 308         struct skd_special_context internal_skspcl;
 
 309         u32 read_cap_blocksize;
 
 310         u32 read_cap_last_lba;
 
 311         int read_cap_is_valid;
 
 312         int inquiry_is_valid;
 
 313         u8 inq_serial_num[13];  /*12 chars plus null term */
 
 317         struct fit_completion_entry_v1 *skcomp_table;
 
 318         struct fit_comp_error_info *skerr_table;
 
 319         dma_addr_t cq_dma_address;
 
 321         wait_queue_head_t waitq;
 
 323         struct timer_list timer;
 
 334         u32 connect_time_stamp;
 
 336 #define SKD_MAX_CONNECT_RETRIES 16
 
 341         struct work_struct completion_worker;
 
 344 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
 
 345 #define SKD_READL(DEV, OFF)      skd_reg_read32(DEV, OFF)
 
 346 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
 
 348 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
 
 350         u32 val = readl(skdev->mem_map[1] + offset);
 
 352         if (unlikely(skdev->dbg_level >= 2))
 
 353                 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
 
 357 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
 
 360         writel(val, skdev->mem_map[1] + offset);
 
 361         if (unlikely(skdev->dbg_level >= 2))
 
 362                 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
 
 365 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
 
 368         writeq(val, skdev->mem_map[1] + offset);
 
 369         if (unlikely(skdev->dbg_level >= 2))
 
 370                 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
 
 375 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
 
 376 static int skd_isr_type = SKD_IRQ_DEFAULT;
 
 378 module_param(skd_isr_type, int, 0444);
 
 379 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
 
 380                  " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
 
 382 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
 
 383 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
 
 385 module_param(skd_max_req_per_msg, int, 0444);
 
 386 MODULE_PARM_DESC(skd_max_req_per_msg,
 
 387                  "Maximum SCSI requests packed in a single message."
 
 388                  " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
 
 390 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
 
 391 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
 
 392 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
 
 394 module_param(skd_max_queue_depth, int, 0444);
 
 395 MODULE_PARM_DESC(skd_max_queue_depth,
 
 396                  "Maximum SCSI requests issued to s1120."
 
 397                  " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
 
 399 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
 
 400 module_param(skd_sgs_per_request, int, 0444);
 
 401 MODULE_PARM_DESC(skd_sgs_per_request,
 
 402                  "Maximum SG elements per block request."
 
 403                  " (1-4096, default==256)");
 
 405 static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
 
 406 module_param(skd_max_pass_thru, int, 0444);
 
 407 MODULE_PARM_DESC(skd_max_pass_thru,
 
 408                  "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
 
 410 module_param(skd_dbg_level, int, 0444);
 
 411 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
 
 413 module_param(skd_isr_comp_limit, int, 0444);
 
 414 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
 
 416 /* Major device number dynamically assigned. */
 
 417 static u32 skd_major;
 
 419 static void skd_destruct(struct skd_device *skdev);
 
 420 static const struct block_device_operations skd_blockdev_ops;
 
 421 static void skd_send_fitmsg(struct skd_device *skdev,
 
 422                             struct skd_fitmsg_context *skmsg);
 
 423 static void skd_send_special_fitmsg(struct skd_device *skdev,
 
 424                                     struct skd_special_context *skspcl);
 
 425 static void skd_request_fn(struct request_queue *rq);
 
 426 static void skd_end_request(struct skd_device *skdev,
 
 427                 struct skd_request_context *skreq, blk_status_t status);
 
 428 static bool skd_preop_sg_list(struct skd_device *skdev,
 
 429                              struct skd_request_context *skreq);
 
 430 static void skd_postop_sg_list(struct skd_device *skdev,
 
 431                                struct skd_request_context *skreq);
 
 433 static void skd_restart_device(struct skd_device *skdev);
 
 434 static int skd_quiesce_dev(struct skd_device *skdev);
 
 435 static int skd_unquiesce_dev(struct skd_device *skdev);
 
 436 static void skd_release_special(struct skd_device *skdev,
 
 437                                 struct skd_special_context *skspcl);
 
 438 static void skd_disable_interrupts(struct skd_device *skdev);
 
 439 static void skd_isr_fwstate(struct skd_device *skdev);
 
 440 static void skd_recover_requests(struct skd_device *skdev);
 
 441 static void skd_soft_reset(struct skd_device *skdev);
 
 443 const char *skd_drive_state_to_str(int state);
 
 444 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
 
 445 static void skd_log_skdev(struct skd_device *skdev, const char *event);
 
 446 static void skd_log_skmsg(struct skd_device *skdev,
 
 447                           struct skd_fitmsg_context *skmsg, const char *event);
 
 448 static void skd_log_skreq(struct skd_device *skdev,
 
 449                           struct skd_request_context *skreq, const char *event);
 
 452  *****************************************************************************
 
 453  * READ/WRITE REQUESTS
 
 454  *****************************************************************************
 
 456 static void skd_fail_all_pending(struct skd_device *skdev)
 
 458         struct request_queue *q = skdev->queue;
 
 462                 req = blk_peek_request(q);
 
 465                 blk_start_request(req);
 
 466                 __blk_end_request_all(req, BLK_STS_IOERR);
 
 471 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
 
 472                 int data_dir, unsigned lba,
 
 475         if (data_dir == READ)
 
 476                 scsi_req->cdb[0] = READ_10;
 
 478                 scsi_req->cdb[0] = WRITE_10;
 
 480         scsi_req->cdb[1] = 0;
 
 481         scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
 
 482         scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
 
 483         scsi_req->cdb[4] = (lba & 0xff00) >> 8;
 
 484         scsi_req->cdb[5] = (lba & 0xff);
 
 485         scsi_req->cdb[6] = 0;
 
 486         scsi_req->cdb[7] = (count & 0xff00) >> 8;
 
 487         scsi_req->cdb[8] = count & 0xff;
 
 488         scsi_req->cdb[9] = 0;
 
 492 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
 
 493                             struct skd_request_context *skreq)
 
 495         skreq->flush_cmd = 1;
 
 497         scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
 
 498         scsi_req->cdb[1] = 0;
 
 499         scsi_req->cdb[2] = 0;
 
 500         scsi_req->cdb[3] = 0;
 
 501         scsi_req->cdb[4] = 0;
 
 502         scsi_req->cdb[5] = 0;
 
 503         scsi_req->cdb[6] = 0;
 
 504         scsi_req->cdb[7] = 0;
 
 505         scsi_req->cdb[8] = 0;
 
 506         scsi_req->cdb[9] = 0;
 
 510  * Return true if and only if all pending requests should be failed.
 
 512 static bool skd_fail_all(struct request_queue *q)
 
 514         struct skd_device *skdev = q->queuedata;
 
 516         SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
 
 518         skd_log_skdev(skdev, "req_not_online");
 
 519         switch (skdev->state) {
 
 520         case SKD_DRVR_STATE_PAUSING:
 
 521         case SKD_DRVR_STATE_PAUSED:
 
 522         case SKD_DRVR_STATE_STARTING:
 
 523         case SKD_DRVR_STATE_RESTARTING:
 
 524         case SKD_DRVR_STATE_WAIT_BOOT:
 
 525         /* In case of starting, we haven't started the queue,
 
 526          * so we can't get here... but requests are
 
 527          * possibly hanging out waiting for us because we
 
 528          * reported the dev/skd0 already.  They'll wait
 
 529          * forever if connect doesn't complete.
 
 530          * What to do??? delay dev/skd0 ??
 
 532         case SKD_DRVR_STATE_BUSY:
 
 533         case SKD_DRVR_STATE_BUSY_IMMINENT:
 
 534         case SKD_DRVR_STATE_BUSY_ERASE:
 
 535         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
 
 538         case SKD_DRVR_STATE_BUSY_SANITIZE:
 
 539         case SKD_DRVR_STATE_STOPPING:
 
 540         case SKD_DRVR_STATE_SYNCING:
 
 541         case SKD_DRVR_STATE_FAULT:
 
 542         case SKD_DRVR_STATE_DISAPPEARED:
 
 548 static void skd_request_fn(struct request_queue *q)
 
 550         struct skd_device *skdev = q->queuedata;
 
 551         struct skd_fitmsg_context *skmsg = NULL;
 
 552         struct fit_msg_hdr *fmh = NULL;
 
 553         struct skd_request_context *skreq;
 
 554         struct request *req = NULL;
 
 555         struct skd_scsi_request *scsi_req;
 
 556         unsigned long io_flags;
 
 565         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
 
 567                         skd_fail_all_pending(skdev);
 
 571         if (blk_queue_stopped(skdev->queue)) {
 
 572                 if (skdev->skmsg_free_list == NULL ||
 
 573                     skdev->skreq_free_list == NULL ||
 
 574                     skdev->in_flight >= skdev->queue_low_water_mark)
 
 575                         /* There is still some kind of shortage */
 
 578                 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
 
 583          *  - There are no more native requests
 
 584          *  - There are already the maximum number of requests in progress
 
 585          *  - There are no more skd_request_context entries
 
 586          *  - There are no more FIT msg buffers
 
 592                 req = blk_peek_request(q);
 
 594                 /* Are there any native requests to start? */
 
 598                 lba = (u32)blk_rq_pos(req);
 
 599                 count = blk_rq_sectors(req);
 
 600                 data_dir = rq_data_dir(req);
 
 601                 io_flags = req->cmd_flags;
 
 603                 if (req_op(req) == REQ_OP_FLUSH)
 
 606                 if (io_flags & REQ_FUA)
 
 609                 dev_dbg(&skdev->pdev->dev,
 
 610                         "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
 
 611                         req, lba, lba, count, count, data_dir);
 
 613                 /* At this point we know there is a request */
 
 615                 /* Are too many requets already in progress? */
 
 616                 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
 
 617                         dev_dbg(&skdev->pdev->dev, "qdepth %d, limit %d\n",
 
 618                                 skdev->in_flight, skdev->cur_max_queue_depth);
 
 622                 /* Is a skd_request_context available? */
 
 623                 skreq = skdev->skreq_free_list;
 
 625                         dev_dbg(&skdev->pdev->dev, "Out of req=%p\n", q);
 
 628                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
 
 629                 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
 
 631                 /* Now we check to see if we can get a fit msg */
 
 633                         if (skdev->skmsg_free_list == NULL) {
 
 634                                 dev_dbg(&skdev->pdev->dev, "Out of msg\n");
 
 639                 skreq->flush_cmd = 0;
 
 641                 skreq->sg_byte_count = 0;
 
 644                  * OK to now dequeue request from q.
 
 646                  * At this point we are comitted to either start or reject
 
 647                  * the native request. Note that skd_request_context is
 
 648                  * available but is still at the head of the free list.
 
 650                 blk_start_request(req);
 
 652                 skreq->fitmsg_id = 0;
 
 654                 skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE :
 
 657                 if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
 
 658                         dev_dbg(&skdev->pdev->dev, "error Out\n");
 
 659                         skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
 
 663                 /* Either a FIT msg is in progress or we have to start one. */
 
 665                         /* Are there any FIT msg buffers available? */
 
 666                         skmsg = skdev->skmsg_free_list;
 
 668                                 dev_dbg(&skdev->pdev->dev,
 
 669                                         "Out of msg skdev=%p\n",
 
 673                         SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
 
 674                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
 
 676                         skdev->skmsg_free_list = skmsg->next;
 
 678                         skmsg->state = SKD_MSG_STATE_BUSY;
 
 679                         skmsg->id += SKD_ID_INCR;
 
 681                         /* Initialize the FIT msg header */
 
 682                         fmh = &skmsg->msg_buf->fmh;
 
 683                         memset(fmh, 0, sizeof(*fmh));
 
 684                         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
 
 685                         skmsg->length = sizeof(*fmh);
 
 688                 skreq->fitmsg_id = skmsg->id;
 
 691                         &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
 
 692                 memset(scsi_req, 0, sizeof(*scsi_req));
 
 694                 be_dmaa = cpu_to_be64(skreq->sksg_dma_address);
 
 695                 cmdctxt = skreq->id + SKD_ID_INCR;
 
 697                 scsi_req->hdr.tag = cmdctxt;
 
 698                 scsi_req->hdr.sg_list_dma_address = be_dmaa;
 
 700                 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
 
 701                         skd_prep_zerosize_flush_cdb(scsi_req, skreq);
 
 702                         SKD_ASSERT(skreq->flush_cmd == 1);
 
 704                         skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
 
 708                         scsi_req->cdb[1] |= SKD_FUA_NV;
 
 710                 scsi_req->hdr.sg_list_len_bytes =
 
 711                         cpu_to_be32(skreq->sg_byte_count);
 
 713                 /* Complete resource allocations. */
 
 714                 skdev->skreq_free_list = skreq->next;
 
 715                 skreq->state = SKD_REQ_STATE_BUSY;
 
 716                 skreq->id += SKD_ID_INCR;
 
 718                 skmsg->length += sizeof(struct skd_scsi_request);
 
 719                 fmh->num_protocol_cmds_coalesced++;
 
 722                  * Update the active request counts.
 
 723                  * Capture the timeout timestamp.
 
 725                 skreq->timeout_stamp = skdev->timeout_stamp;
 
 726                 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
 
 727                 skdev->timeout_slot[timo_slot]++;
 
 729                 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
 
 733                  * If the FIT msg buffer is full send it.
 
 735                 if (fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
 
 736                         skd_send_fitmsg(skdev, skmsg);
 
 742         /* If the FIT msg buffer is not empty send what we got. */
 
 744                 WARN_ON_ONCE(!fmh->num_protocol_cmds_coalesced);
 
 745                 skd_send_fitmsg(skdev, skmsg);
 
 751          * If req is non-NULL it means there is something to do but
 
 752          * we are out of a resource.
 
 755                 blk_stop_queue(skdev->queue);
 
 758 static void skd_end_request(struct skd_device *skdev,
 
 759                 struct skd_request_context *skreq, blk_status_t error)
 
 761         if (unlikely(error)) {
 
 762                 struct request *req = skreq->req;
 
 763                 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
 
 764                 u32 lba = (u32)blk_rq_pos(req);
 
 765                 u32 count = blk_rq_sectors(req);
 
 767                 dev_err(&skdev->pdev->dev,
 
 768                         "Error cmd=%s sect=%u count=%u id=0x%x\n", cmd, lba,
 
 771                 dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", skreq->id,
 
 774         __blk_end_request_all(skreq->req, error);
 
 777 static bool skd_preop_sg_list(struct skd_device *skdev,
 
 778                              struct skd_request_context *skreq)
 
 780         struct request *req = skreq->req;
 
 781         struct scatterlist *sgl = &skreq->sg[0], *sg;
 
 785         skreq->sg_byte_count = 0;
 
 787         WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
 
 788                      skreq->data_dir != DMA_FROM_DEVICE);
 
 790         n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
 
 795          * Map scatterlist to PCI bus addresses.
 
 796          * Note PCI might change the number of entries.
 
 798         n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
 
 802         SKD_ASSERT(n_sg <= skdev->sgs_per_request);
 
 806         for_each_sg(sgl, sg, n_sg, i) {
 
 807                 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
 
 808                 u32 cnt = sg_dma_len(sg);
 
 809                 uint64_t dma_addr = sg_dma_address(sg);
 
 811                 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
 
 812                 sgd->byte_count = cnt;
 
 813                 skreq->sg_byte_count += cnt;
 
 814                 sgd->host_side_addr = dma_addr;
 
 815                 sgd->dev_side_addr = 0;
 
 818         skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
 
 819         skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
 
 821         if (unlikely(skdev->dbg_level > 1)) {
 
 822                 dev_dbg(&skdev->pdev->dev,
 
 823                         "skreq=%x sksg_list=%p sksg_dma=%llx\n",
 
 824                         skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
 
 825                 for (i = 0; i < n_sg; i++) {
 
 826                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
 
 828                         dev_dbg(&skdev->pdev->dev,
 
 829                                 "  sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
 
 830                                 i, sgd->byte_count, sgd->control,
 
 831                                 sgd->host_side_addr, sgd->next_desc_ptr);
 
 838 static void skd_postop_sg_list(struct skd_device *skdev,
 
 839                                struct skd_request_context *skreq)
 
 842          * restore the next ptr for next IO request so we
 
 843          * don't have to set it every time.
 
 845         skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
 
 846                 skreq->sksg_dma_address +
 
 847                 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
 
 848         pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
 
 852  *****************************************************************************
 
 854  *****************************************************************************
 
 857 static void skd_timer_tick_not_online(struct skd_device *skdev);
 
 859 static void skd_timer_tick(ulong arg)
 
 861         struct skd_device *skdev = (struct skd_device *)arg;
 
 864         unsigned long reqflags;
 
 867         if (skdev->state == SKD_DRVR_STATE_FAULT)
 
 868                 /* The driver has declared fault, and we want it to
 
 869                  * stay that way until driver is reloaded.
 
 873         spin_lock_irqsave(&skdev->lock, reqflags);
 
 875         state = SKD_READL(skdev, FIT_STATUS);
 
 876         state &= FIT_SR_DRIVE_STATE_MASK;
 
 877         if (state != skdev->drive_state)
 
 878                 skd_isr_fwstate(skdev);
 
 880         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
 
 881                 skd_timer_tick_not_online(skdev);
 
 884         skdev->timeout_stamp++;
 
 885         timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
 
 888          * All requests that happened during the previous use of
 
 889          * this slot should be done by now. The previous use was
 
 890          * over 7 seconds ago.
 
 892         if (skdev->timeout_slot[timo_slot] == 0)
 
 895         /* Something is overdue */
 
 896         dev_dbg(&skdev->pdev->dev, "found %d timeouts, draining busy=%d\n",
 
 897                 skdev->timeout_slot[timo_slot], skdev->in_flight);
 
 898         dev_err(&skdev->pdev->dev, "Overdue IOs (%d), busy %d\n",
 
 899                 skdev->timeout_slot[timo_slot], skdev->in_flight);
 
 901         skdev->timer_countdown = SKD_DRAINING_TIMO;
 
 902         skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
 
 903         skdev->timo_slot = timo_slot;
 
 904         blk_stop_queue(skdev->queue);
 
 907         mod_timer(&skdev->timer, (jiffies + HZ));
 
 909         spin_unlock_irqrestore(&skdev->lock, reqflags);
 
 912 static void skd_timer_tick_not_online(struct skd_device *skdev)
 
 914         switch (skdev->state) {
 
 915         case SKD_DRVR_STATE_IDLE:
 
 916         case SKD_DRVR_STATE_LOAD:
 
 918         case SKD_DRVR_STATE_BUSY_SANITIZE:
 
 919                 dev_dbg(&skdev->pdev->dev,
 
 920                         "drive busy sanitize[%x], driver[%x]\n",
 
 921                         skdev->drive_state, skdev->state);
 
 922                 /* If we've been in sanitize for 3 seconds, we figure we're not
 
 923                  * going to get anymore completions, so recover requests now
 
 925                 if (skdev->timer_countdown > 0) {
 
 926                         skdev->timer_countdown--;
 
 929                 skd_recover_requests(skdev);
 
 932         case SKD_DRVR_STATE_BUSY:
 
 933         case SKD_DRVR_STATE_BUSY_IMMINENT:
 
 934         case SKD_DRVR_STATE_BUSY_ERASE:
 
 935                 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
 
 936                         skdev->state, skdev->timer_countdown);
 
 937                 if (skdev->timer_countdown > 0) {
 
 938                         skdev->timer_countdown--;
 
 941                 dev_dbg(&skdev->pdev->dev,
 
 942                         "busy[%x], timedout=%d, restarting device.",
 
 943                         skdev->state, skdev->timer_countdown);
 
 944                 skd_restart_device(skdev);
 
 947         case SKD_DRVR_STATE_WAIT_BOOT:
 
 948         case SKD_DRVR_STATE_STARTING:
 
 949                 if (skdev->timer_countdown > 0) {
 
 950                         skdev->timer_countdown--;
 
 953                 /* For now, we fault the drive.  Could attempt resets to
 
 954                  * revcover at some point. */
 
 955                 skdev->state = SKD_DRVR_STATE_FAULT;
 
 957                 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
 
 960                 /*start the queue so we can respond with error to requests */
 
 961                 /* wakeup anyone waiting for startup complete */
 
 962                 blk_start_queue(skdev->queue);
 
 963                 skdev->gendisk_on = -1;
 
 964                 wake_up_interruptible(&skdev->waitq);
 
 967         case SKD_DRVR_STATE_ONLINE:
 
 968                 /* shouldn't get here. */
 
 971         case SKD_DRVR_STATE_PAUSING:
 
 972         case SKD_DRVR_STATE_PAUSED:
 
 975         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
 
 976                 dev_dbg(&skdev->pdev->dev,
 
 977                         "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
 
 978                         skdev->timo_slot, skdev->timer_countdown,
 
 980                         skdev->timeout_slot[skdev->timo_slot]);
 
 981                 /* if the slot has cleared we can let the I/O continue */
 
 982                 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
 
 983                         dev_dbg(&skdev->pdev->dev,
 
 984                                 "Slot drained, starting queue.\n");
 
 985                         skdev->state = SKD_DRVR_STATE_ONLINE;
 
 986                         blk_start_queue(skdev->queue);
 
 989                 if (skdev->timer_countdown > 0) {
 
 990                         skdev->timer_countdown--;
 
 993                 skd_restart_device(skdev);
 
 996         case SKD_DRVR_STATE_RESTARTING:
 
 997                 if (skdev->timer_countdown > 0) {
 
 998                         skdev->timer_countdown--;
 
1001                 /* For now, we fault the drive. Could attempt resets to
 
1002                  * revcover at some point. */
 
1003                 skdev->state = SKD_DRVR_STATE_FAULT;
 
1004                 dev_err(&skdev->pdev->dev,
 
1005                         "DriveFault Reconnect Timeout (%x)\n",
 
1006                         skdev->drive_state);
 
1009                  * Recovering does two things:
 
1010                  * 1. completes IO with error
 
1011                  * 2. reclaims dma resources
 
1012                  * When is it safe to recover requests?
 
1013                  * - if the drive state is faulted
 
1014                  * - if the state is still soft reset after out timeout
 
1015                  * - if the drive registers are dead (state = FF)
 
1016                  * If it is "unsafe", we still need to recover, so we will
 
1017                  * disable pci bus mastering and disable our interrupts.
 
1020                 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
 
1021                     (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
 
1022                     (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
 
1023                         /* It never came out of soft reset. Try to
 
1024                          * recover the requests and then let them
 
1025                          * fail. This is to mitigate hung processes. */
 
1026                         skd_recover_requests(skdev);
 
1028                         dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
 
1029                                 skdev->drive_state);
 
1030                         pci_disable_device(skdev->pdev);
 
1031                         skd_disable_interrupts(skdev);
 
1032                         skd_recover_requests(skdev);
 
1035                 /*start the queue so we can respond with error to requests */
 
1036                 /* wakeup anyone waiting for startup complete */
 
1037                 blk_start_queue(skdev->queue);
 
1038                 skdev->gendisk_on = -1;
 
1039                 wake_up_interruptible(&skdev->waitq);
 
1042         case SKD_DRVR_STATE_RESUMING:
 
1043         case SKD_DRVR_STATE_STOPPING:
 
1044         case SKD_DRVR_STATE_SYNCING:
 
1045         case SKD_DRVR_STATE_FAULT:
 
1046         case SKD_DRVR_STATE_DISAPPEARED:
 
1052 static int skd_start_timer(struct skd_device *skdev)
 
1056         setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
 
1058         rc = mod_timer(&skdev->timer, (jiffies + HZ));
 
1060                 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
 
1064 static void skd_kill_timer(struct skd_device *skdev)
 
1066         del_timer_sync(&skdev->timer);
 
1070  *****************************************************************************
 
1072  *****************************************************************************
 
1074 static int skd_ioctl_sg_io(struct skd_device *skdev,
 
1075                            fmode_t mode, void __user *argp);
 
1076 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
 
1077                                         struct skd_sg_io *sksgio);
 
1078 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
 
1079                                    struct skd_sg_io *sksgio);
 
1080 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
 
1081                                     struct skd_sg_io *sksgio);
 
1082 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
 
1083                                  struct skd_sg_io *sksgio, int dxfer_dir);
 
1084 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
 
1085                                  struct skd_sg_io *sksgio);
 
1086 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
 
1087 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
 
1088                                     struct skd_sg_io *sksgio);
 
1089 static int skd_sg_io_put_status(struct skd_device *skdev,
 
1090                                 struct skd_sg_io *sksgio);
 
1092 static void skd_complete_special(struct skd_device *skdev,
 
1093                                  struct fit_completion_entry_v1 *skcomp,
 
1094                                  struct fit_comp_error_info *skerr,
 
1095                                  struct skd_special_context *skspcl);
 
1097 static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
 
1098                           uint cmd_in, ulong arg)
 
1100         static const int sg_version_num = 30527;
 
1101         int rc = 0, timeout;
 
1102         struct gendisk *disk = bdev->bd_disk;
 
1103         struct skd_device *skdev = disk->private_data;
 
1104         int __user *p = (int __user *)arg;
 
1106         dev_dbg(&skdev->pdev->dev,
 
1107                 "%s: CMD[%s] ioctl  mode 0x%x, cmd 0x%x arg %0lx\n",
 
1108                 disk->disk_name, current->comm, mode, cmd_in, arg);
 
1110         if (!capable(CAP_SYS_ADMIN))
 
1114         case SG_SET_TIMEOUT:
 
1115                 rc = get_user(timeout, p);
 
1117                         disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
 
1119         case SG_GET_TIMEOUT:
 
1120                 rc = jiffies_to_clock_t(disk->queue->sg_timeout);
 
1122         case SG_GET_VERSION_NUM:
 
1123                 rc = put_user(sg_version_num, p);
 
1126                 rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
 
1134         dev_dbg(&skdev->pdev->dev, "%s:  completion rc %d\n", disk->disk_name,
 
1139 static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
 
1143         struct skd_sg_io sksgio;
 
1145         memset(&sksgio, 0, sizeof(sksgio));
 
1148         sksgio.iov = &sksgio.no_iov_iov;
 
1150         switch (skdev->state) {
 
1151         case SKD_DRVR_STATE_ONLINE:
 
1152         case SKD_DRVR_STATE_BUSY_IMMINENT:
 
1156                 dev_dbg(&skdev->pdev->dev, "drive not online\n");
 
1161         rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
 
1165         rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
 
1169         rc = skd_sg_io_prep_buffering(skdev, &sksgio);
 
1173         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
 
1177         rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
 
1181         rc = skd_sg_io_await(skdev, &sksgio);
 
1185         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
 
1189         rc = skd_sg_io_put_status(skdev, &sksgio);
 
1196         skd_sg_io_release_skspcl(skdev, &sksgio);
 
1198         if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
 
1203 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
 
1204                                         struct skd_sg_io *sksgio)
 
1206         struct sg_io_hdr *sgp = &sksgio->sg;
 
1207         int i, __maybe_unused acc;
 
1209         if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
 
1210                 dev_dbg(&skdev->pdev->dev, "access sg failed %p\n",
 
1215         if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
 
1216                 dev_dbg(&skdev->pdev->dev, "copy_from_user sg failed %p\n",
 
1221         if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
 
1222                 dev_dbg(&skdev->pdev->dev, "interface_id invalid 0x%x\n",
 
1227         if (sgp->cmd_len > sizeof(sksgio->cdb)) {
 
1228                 dev_dbg(&skdev->pdev->dev, "cmd_len invalid %d\n",
 
1233         if (sgp->iovec_count > 256) {
 
1234                 dev_dbg(&skdev->pdev->dev, "iovec_count invalid %d\n",
 
1239         if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
 
1240                 dev_dbg(&skdev->pdev->dev, "dxfer_len invalid %d\n",
 
1245         switch (sgp->dxfer_direction) {
 
1250         case SG_DXFER_TO_DEV:
 
1254         case SG_DXFER_FROM_DEV:
 
1255         case SG_DXFER_TO_FROM_DEV:
 
1260                 dev_dbg(&skdev->pdev->dev, "dxfer_dir invalid %d\n",
 
1261                         sgp->dxfer_direction);
 
1265         if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
 
1266                 dev_dbg(&skdev->pdev->dev, "copy_from_user cmdp failed %p\n",
 
1271         if (sgp->mx_sb_len != 0) {
 
1272                 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
 
1273                         dev_dbg(&skdev->pdev->dev, "access sbp failed %p\n",
 
1279         if (sgp->iovec_count == 0) {
 
1280                 sksgio->iov[0].iov_base = sgp->dxferp;
 
1281                 sksgio->iov[0].iov_len = sgp->dxfer_len;
 
1283                 sksgio->dxfer_len = sgp->dxfer_len;
 
1285                 struct sg_iovec *iov;
 
1286                 uint nbytes = sizeof(*iov) * sgp->iovec_count;
 
1287                 size_t iov_data_len;
 
1289                 iov = kmalloc(nbytes, GFP_KERNEL);
 
1291                         dev_dbg(&skdev->pdev->dev, "alloc iovec failed %d\n",
 
1296                 sksgio->iovcnt = sgp->iovec_count;
 
1298                 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
 
1299                         dev_dbg(&skdev->pdev->dev,
 
1300                                 "copy_from_user iovec failed %p\n",
 
1306                  * Sum up the vecs, making sure they don't overflow
 
1309                 for (i = 0; i < sgp->iovec_count; i++) {
 
1310                         if (iov_data_len + iov[i].iov_len < iov_data_len)
 
1312                         iov_data_len += iov[i].iov_len;
 
1315                 /* SG_IO howto says that the shorter of the two wins */
 
1316                 if (sgp->dxfer_len < iov_data_len) {
 
1317                         sksgio->iovcnt = iov_shorten((struct iovec *)iov,
 
1320                         sksgio->dxfer_len = sgp->dxfer_len;
 
1322                         sksgio->dxfer_len = iov_data_len;
 
1325         if (sgp->dxfer_direction != SG_DXFER_NONE) {
 
1326                 struct sg_iovec *iov = sksgio->iov;
 
1327                 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
 
1328                         if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
 
1329                                 dev_dbg(&skdev->pdev->dev,
 
1330                                         "access data failed %p/%zd\n",
 
1331                                         iov->iov_base, iov->iov_len);
 
1340 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
 
1341                                    struct skd_sg_io *sksgio)
 
1343         struct skd_special_context *skspcl = NULL;
 
1349                 spin_lock_irqsave(&skdev->lock, flags);
 
1350                 skspcl = skdev->skspcl_free_list;
 
1351                 if (skspcl != NULL) {
 
1352                         skdev->skspcl_free_list =
 
1353                                 (struct skd_special_context *)skspcl->req.next;
 
1354                         skspcl->req.id += SKD_ID_INCR;
 
1355                         skspcl->req.state = SKD_REQ_STATE_SETUP;
 
1356                         skspcl->orphaned = 0;
 
1357                         skspcl->req.n_sg = 0;
 
1359                 spin_unlock_irqrestore(&skdev->lock, flags);
 
1361                 if (skspcl != NULL) {
 
1366                 dev_dbg(&skdev->pdev->dev, "blocking\n");
 
1368                 rc = wait_event_interruptible_timeout(
 
1370                                 (skdev->skspcl_free_list != NULL),
 
1371                                 msecs_to_jiffies(sksgio->sg.timeout));
 
1373                 dev_dbg(&skdev->pdev->dev, "unblocking, rc=%d\n", rc);
 
1383                  * If we get here rc > 0 meaning the timeout to
 
1384                  * wait_event_interruptible_timeout() had time left, hence the
 
1385                  * sought event -- non-empty free list -- happened.
 
1386                  * Retry the allocation.
 
1389         sksgio->skspcl = skspcl;
 
1394 static int skd_skreq_prep_buffering(struct skd_device *skdev,
 
1395                                     struct skd_request_context *skreq,
 
1398         u32 resid = dxfer_len;
 
1401          * The DMA engine must have aligned addresses and byte counts.
 
1403         resid += (-resid) & 3;
 
1404         skreq->sg_byte_count = resid;
 
1409                 u32 nbytes = PAGE_SIZE;
 
1410                 u32 ix = skreq->n_sg;
 
1411                 struct scatterlist *sg = &skreq->sg[ix];
 
1412                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
 
1418                 page = alloc_page(GFP_KERNEL);
 
1422                 sg_set_page(sg, page, nbytes, 0);
 
1424                 /* TODO: This should be going through a pci_???()
 
1425                  * routine to do proper mapping. */
 
1426                 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
 
1427                 sksg->byte_count = nbytes;
 
1429                 sksg->host_side_addr = sg_phys(sg);
 
1431                 sksg->dev_side_addr = 0;
 
1432                 sksg->next_desc_ptr = skreq->sksg_dma_address +
 
1433                                       (ix + 1) * sizeof(*sksg);
 
1439         if (skreq->n_sg > 0) {
 
1440                 u32 ix = skreq->n_sg - 1;
 
1441                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
 
1443                 sksg->control = FIT_SGD_CONTROL_LAST;
 
1444                 sksg->next_desc_ptr = 0;
 
1447         if (unlikely(skdev->dbg_level > 1)) {
 
1450                 dev_dbg(&skdev->pdev->dev,
 
1451                         "skreq=%x sksg_list=%p sksg_dma=%llx\n",
 
1452                         skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
 
1453                 for (i = 0; i < skreq->n_sg; i++) {
 
1454                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
 
1456                         dev_dbg(&skdev->pdev->dev,
 
1457                                 "  sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
 
1458                                 i, sgd->byte_count, sgd->control,
 
1459                                 sgd->host_side_addr, sgd->next_desc_ptr);
 
1466 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
 
1467                                     struct skd_sg_io *sksgio)
 
1469         struct skd_special_context *skspcl = sksgio->skspcl;
 
1470         struct skd_request_context *skreq = &skspcl->req;
 
1471         u32 dxfer_len = sksgio->dxfer_len;
 
1474         rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
 
1476          * Eventually, errors or not, skd_release_special() is called
 
1477          * to recover allocations including partial allocations.
 
1482 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
 
1483                                  struct skd_sg_io *sksgio, int dxfer_dir)
 
1485         struct skd_special_context *skspcl = sksgio->skspcl;
 
1487         struct sg_iovec curiov;
 
1491         u32 resid = sksgio->dxfer_len;
 
1495         curiov.iov_base = NULL;
 
1497         if (dxfer_dir != sksgio->sg.dxfer_direction) {
 
1498                 if (dxfer_dir != SG_DXFER_TO_DEV ||
 
1499                     sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
 
1504                 u32 nbytes = PAGE_SIZE;
 
1506                 if (curiov.iov_len == 0) {
 
1507                         curiov = sksgio->iov[iov_ix++];
 
1513                         page = sg_page(&skspcl->req.sg[sksg_ix++]);
 
1514                         bufp = page_address(page);
 
1515                         buf_len = PAGE_SIZE;
 
1518                 nbytes = min_t(u32, nbytes, resid);
 
1519                 nbytes = min_t(u32, nbytes, curiov.iov_len);
 
1520                 nbytes = min_t(u32, nbytes, buf_len);
 
1522                 if (dxfer_dir == SG_DXFER_TO_DEV)
 
1523                         rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
 
1525                         rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
 
1531                 curiov.iov_len -= nbytes;
 
1532                 curiov.iov_base += nbytes;
 
1539 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
 
1540                                  struct skd_sg_io *sksgio)
 
1542         struct skd_special_context *skspcl = sksgio->skspcl;
 
1543         struct fit_msg_hdr *fmh = &skspcl->msg_buf->fmh;
 
1544         struct skd_scsi_request *scsi_req = &skspcl->msg_buf->scsi[0];
 
1546         memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
 
1548         /* Initialize the FIT msg header */
 
1549         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
 
1550         fmh->num_protocol_cmds_coalesced = 1;
 
1552         /* Initialize the SCSI request */
 
1553         if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
 
1554                 scsi_req->hdr.sg_list_dma_address =
 
1555                         cpu_to_be64(skspcl->req.sksg_dma_address);
 
1556         scsi_req->hdr.tag = skspcl->req.id;
 
1557         scsi_req->hdr.sg_list_len_bytes =
 
1558                 cpu_to_be32(skspcl->req.sg_byte_count);
 
1559         memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
 
1561         skspcl->req.state = SKD_REQ_STATE_BUSY;
 
1562         skd_send_special_fitmsg(skdev, skspcl);
 
1567 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
 
1569         unsigned long flags;
 
1572         rc = wait_event_interruptible_timeout(skdev->waitq,
 
1573                                               (sksgio->skspcl->req.state !=
 
1574                                                SKD_REQ_STATE_BUSY),
 
1575                                               msecs_to_jiffies(sksgio->sg.
 
1578         spin_lock_irqsave(&skdev->lock, flags);
 
1580         if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
 
1581                 dev_dbg(&skdev->pdev->dev, "skspcl %p aborted\n",
 
1584                 /* Build check cond, sense and let command finish. */
 
1585                 /* For a timeout, we must fabricate completion and sense
 
1586                  * data to complete the command */
 
1587                 sksgio->skspcl->req.completion.status =
 
1588                         SAM_STAT_CHECK_CONDITION;
 
1590                 memset(&sksgio->skspcl->req.err_info, 0,
 
1591                        sizeof(sksgio->skspcl->req.err_info));
 
1592                 sksgio->skspcl->req.err_info.type = 0x70;
 
1593                 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
 
1594                 sksgio->skspcl->req.err_info.code = 0x44;
 
1595                 sksgio->skspcl->req.err_info.qual = 0;
 
1597         } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
 
1598                 /* No longer on the adapter. We finish. */
 
1601                 /* Something's gone wrong. Still busy. Timeout or
 
1602                  * user interrupted (control-C). Mark as an orphan
 
1603                  * so it will be disposed when completed. */
 
1604                 sksgio->skspcl->orphaned = 1;
 
1605                 sksgio->skspcl = NULL;
 
1607                         dev_dbg(&skdev->pdev->dev, "timed out %p (%u ms)\n",
 
1608                                 sksgio, sksgio->sg.timeout);
 
1611                         dev_dbg(&skdev->pdev->dev, "cntlc %p\n", sksgio);
 
1616         spin_unlock_irqrestore(&skdev->lock, flags);
 
1621 static int skd_sg_io_put_status(struct skd_device *skdev,
 
1622                                 struct skd_sg_io *sksgio)
 
1624         struct sg_io_hdr *sgp = &sksgio->sg;
 
1625         struct skd_special_context *skspcl = sksgio->skspcl;
 
1628         u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
 
1630         sgp->status = skspcl->req.completion.status;
 
1631         resid = sksgio->dxfer_len - nb;
 
1633         sgp->masked_status = sgp->status & STATUS_MASK;
 
1634         sgp->msg_status = 0;
 
1635         sgp->host_status = 0;
 
1636         sgp->driver_status = 0;
 
1638         if (sgp->masked_status || sgp->host_status || sgp->driver_status)
 
1639                 sgp->info |= SG_INFO_CHECK;
 
1641         dev_dbg(&skdev->pdev->dev, "status %x masked %x resid 0x%x\n",
 
1642                 sgp->status, sgp->masked_status, sgp->resid);
 
1644         if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
 
1645                 if (sgp->mx_sb_len > 0) {
 
1646                         struct fit_comp_error_info *ei = &skspcl->req.err_info;
 
1647                         u32 nbytes = sizeof(*ei);
 
1649                         nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
 
1651                         sgp->sb_len_wr = nbytes;
 
1653                         if (__copy_to_user(sgp->sbp, ei, nbytes)) {
 
1654                                 dev_dbg(&skdev->pdev->dev,
 
1655                                         "copy_to_user sense failed %p\n",
 
1662         if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
 
1663                 dev_dbg(&skdev->pdev->dev, "copy_to_user sg failed %p\n",
 
1671 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
 
1672                                     struct skd_sg_io *sksgio)
 
1674         struct skd_special_context *skspcl = sksgio->skspcl;
 
1676         if (skspcl != NULL) {
 
1679                 sksgio->skspcl = NULL;
 
1681                 spin_lock_irqsave(&skdev->lock, flags);
 
1682                 skd_release_special(skdev, skspcl);
 
1683                 spin_unlock_irqrestore(&skdev->lock, flags);
 
1690  *****************************************************************************
 
1691  * INTERNAL REQUESTS -- generated by driver itself
 
1692  *****************************************************************************
 
1695 static int skd_format_internal_skspcl(struct skd_device *skdev)
 
1697         struct skd_special_context *skspcl = &skdev->internal_skspcl;
 
1698         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
 
1699         struct fit_msg_hdr *fmh;
 
1700         uint64_t dma_address;
 
1701         struct skd_scsi_request *scsi;
 
1703         fmh = &skspcl->msg_buf->fmh;
 
1704         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
 
1705         fmh->num_protocol_cmds_coalesced = 1;
 
1707         scsi = &skspcl->msg_buf->scsi[0];
 
1708         memset(scsi, 0, sizeof(*scsi));
 
1709         dma_address = skspcl->req.sksg_dma_address;
 
1710         scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
 
1711         sgd->control = FIT_SGD_CONTROL_LAST;
 
1712         sgd->byte_count = 0;
 
1713         sgd->host_side_addr = skspcl->db_dma_address;
 
1714         sgd->dev_side_addr = 0;
 
1715         sgd->next_desc_ptr = 0LL;
 
1720 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
 
1722 static void skd_send_internal_skspcl(struct skd_device *skdev,
 
1723                                      struct skd_special_context *skspcl,
 
1726         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
 
1727         struct skd_scsi_request *scsi;
 
1728         unsigned char *buf = skspcl->data_buf;
 
1731         if (skspcl->req.state != SKD_REQ_STATE_IDLE)
 
1733                  * A refresh is already in progress.
 
1734                  * Just wait for it to finish.
 
1738         SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
 
1739         skspcl->req.state = SKD_REQ_STATE_BUSY;
 
1740         skspcl->req.id += SKD_ID_INCR;
 
1742         scsi = &skspcl->msg_buf->scsi[0];
 
1743         scsi->hdr.tag = skspcl->req.id;
 
1745         memset(scsi->cdb, 0, sizeof(scsi->cdb));
 
1748         case TEST_UNIT_READY:
 
1749                 scsi->cdb[0] = TEST_UNIT_READY;
 
1750                 sgd->byte_count = 0;
 
1751                 scsi->hdr.sg_list_len_bytes = 0;
 
1755                 scsi->cdb[0] = READ_CAPACITY;
 
1756                 sgd->byte_count = SKD_N_READ_CAP_BYTES;
 
1757                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
 
1761                 scsi->cdb[0] = INQUIRY;
 
1762                 scsi->cdb[1] = 0x01;    /* evpd */
 
1763                 scsi->cdb[2] = 0x80;    /* serial number page */
 
1764                 scsi->cdb[4] = 0x10;
 
1765                 sgd->byte_count = 16;
 
1766                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
 
1769         case SYNCHRONIZE_CACHE:
 
1770                 scsi->cdb[0] = SYNCHRONIZE_CACHE;
 
1771                 sgd->byte_count = 0;
 
1772                 scsi->hdr.sg_list_len_bytes = 0;
 
1776                 scsi->cdb[0] = WRITE_BUFFER;
 
1777                 scsi->cdb[1] = 0x02;
 
1778                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
 
1779                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
 
1780                 sgd->byte_count = WR_BUF_SIZE;
 
1781                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
 
1782                 /* fill incrementing byte pattern */
 
1783                 for (i = 0; i < sgd->byte_count; i++)
 
1788                 scsi->cdb[0] = READ_BUFFER;
 
1789                 scsi->cdb[1] = 0x02;
 
1790                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
 
1791                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
 
1792                 sgd->byte_count = WR_BUF_SIZE;
 
1793                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
 
1794                 memset(skspcl->data_buf, 0, sgd->byte_count);
 
1798                 SKD_ASSERT("Don't know what to send");
 
1802         skd_send_special_fitmsg(skdev, skspcl);
 
1805 static void skd_refresh_device_data(struct skd_device *skdev)
 
1807         struct skd_special_context *skspcl = &skdev->internal_skspcl;
 
1809         skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
 
1812 static int skd_chk_read_buf(struct skd_device *skdev,
 
1813                             struct skd_special_context *skspcl)
 
1815         unsigned char *buf = skspcl->data_buf;
 
1818         /* check for incrementing byte pattern */
 
1819         for (i = 0; i < WR_BUF_SIZE; i++)
 
1820                 if (buf[i] != (i & 0xFF))
 
1826 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
 
1827                                  u8 code, u8 qual, u8 fruc)
 
1829         /* If the check condition is of special interest, log a message */
 
1830         if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
 
1831             && (code == 0x04) && (qual == 0x06)) {
 
1832                 dev_err(&skdev->pdev->dev,
 
1833                         "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
 
1834                         key, code, qual, fruc);
 
1838 static void skd_complete_internal(struct skd_device *skdev,
 
1839                                   struct fit_completion_entry_v1 *skcomp,
 
1840                                   struct fit_comp_error_info *skerr,
 
1841                                   struct skd_special_context *skspcl)
 
1843         u8 *buf = skspcl->data_buf;
 
1846         struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
 
1848         lockdep_assert_held(&skdev->lock);
 
1850         SKD_ASSERT(skspcl == &skdev->internal_skspcl);
 
1852         dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
 
1854         skspcl->req.completion = *skcomp;
 
1855         skspcl->req.state = SKD_REQ_STATE_IDLE;
 
1856         skspcl->req.id += SKD_ID_INCR;
 
1858         status = skspcl->req.completion.status;
 
1860         skd_log_check_status(skdev, status, skerr->key, skerr->code,
 
1861                              skerr->qual, skerr->fruc);
 
1863         switch (scsi->cdb[0]) {
 
1864         case TEST_UNIT_READY:
 
1865                 if (status == SAM_STAT_GOOD)
 
1866                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
 
1867                 else if ((status == SAM_STAT_CHECK_CONDITION) &&
 
1868                          (skerr->key == MEDIUM_ERROR))
 
1869                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
 
1871                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
 
1872                                 dev_dbg(&skdev->pdev->dev,
 
1873                                         "TUR failed, don't send anymore state 0x%x\n",
 
1877                         dev_dbg(&skdev->pdev->dev,
 
1878                                 "**** TUR failed, retry skerr\n");
 
1879                         skd_send_internal_skspcl(skdev, skspcl,
 
1885                 if (status == SAM_STAT_GOOD)
 
1886                         skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
 
1888                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
 
1889                                 dev_dbg(&skdev->pdev->dev,
 
1890                                         "write buffer failed, don't send anymore state 0x%x\n",
 
1894                         dev_dbg(&skdev->pdev->dev,
 
1895                                 "**** write buffer failed, retry skerr\n");
 
1896                         skd_send_internal_skspcl(skdev, skspcl,
 
1902                 if (status == SAM_STAT_GOOD) {
 
1903                         if (skd_chk_read_buf(skdev, skspcl) == 0)
 
1904                                 skd_send_internal_skspcl(skdev, skspcl,
 
1907                                 dev_err(&skdev->pdev->dev,
 
1908                                         "*** W/R Buffer mismatch %d ***\n",
 
1909                                         skdev->connect_retries);
 
1910                                 if (skdev->connect_retries <
 
1911                                     SKD_MAX_CONNECT_RETRIES) {
 
1912                                         skdev->connect_retries++;
 
1913                                         skd_soft_reset(skdev);
 
1915                                         dev_err(&skdev->pdev->dev,
 
1916                                                 "W/R Buffer Connect Error\n");
 
1922                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
 
1923                                 dev_dbg(&skdev->pdev->dev,
 
1924                                         "read buffer failed, don't send anymore state 0x%x\n",
 
1928                         dev_dbg(&skdev->pdev->dev,
 
1929                                 "**** read buffer failed, retry skerr\n");
 
1930                         skd_send_internal_skspcl(skdev, skspcl,
 
1936                 skdev->read_cap_is_valid = 0;
 
1937                 if (status == SAM_STAT_GOOD) {
 
1938                         skdev->read_cap_last_lba =
 
1939                                 (buf[0] << 24) | (buf[1] << 16) |
 
1940                                 (buf[2] << 8) | buf[3];
 
1941                         skdev->read_cap_blocksize =
 
1942                                 (buf[4] << 24) | (buf[5] << 16) |
 
1943                                 (buf[6] << 8) | buf[7];
 
1945                         dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
 
1946                                 skdev->read_cap_last_lba,
 
1947                                 skdev->read_cap_blocksize);
 
1949                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
 
1951                         skdev->read_cap_is_valid = 1;
 
1953                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
 
1954                 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
 
1955                            (skerr->key == MEDIUM_ERROR)) {
 
1956                         skdev->read_cap_last_lba = ~0;
 
1957                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
 
1958                         dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
 
1959                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
 
1961                         dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
 
1962                         skd_send_internal_skspcl(skdev, skspcl,
 
1968                 skdev->inquiry_is_valid = 0;
 
1969                 if (status == SAM_STAT_GOOD) {
 
1970                         skdev->inquiry_is_valid = 1;
 
1972                         for (i = 0; i < 12; i++)
 
1973                                 skdev->inq_serial_num[i] = buf[i + 4];
 
1974                         skdev->inq_serial_num[12] = 0;
 
1977                 if (skd_unquiesce_dev(skdev) < 0)
 
1978                         dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
 
1979                  /* connection is complete */
 
1980                 skdev->connect_retries = 0;
 
1983         case SYNCHRONIZE_CACHE:
 
1984                 if (status == SAM_STAT_GOOD)
 
1985                         skdev->sync_done = 1;
 
1987                         skdev->sync_done = -1;
 
1988                 wake_up_interruptible(&skdev->waitq);
 
1992                 SKD_ASSERT("we didn't send this");
 
1997  *****************************************************************************
 
1999  *****************************************************************************
 
2002 static void skd_send_fitmsg(struct skd_device *skdev,
 
2003                             struct skd_fitmsg_context *skmsg)
 
2006         struct fit_msg_hdr *fmh;
 
2008         dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
 
2009                 skmsg->mb_dma_address, skdev->in_flight);
 
2010         dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
 
2012         qcmd = skmsg->mb_dma_address;
 
2013         qcmd |= FIT_QCMD_QID_NORMAL;
 
2015         fmh = &skmsg->msg_buf->fmh;
 
2016         skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
 
2018         if (unlikely(skdev->dbg_level > 1)) {
 
2019                 u8 *bp = (u8 *)skmsg->msg_buf;
 
2021                 for (i = 0; i < skmsg->length; i += 8) {
 
2022                         dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
 
2029         if (skmsg->length > 256)
 
2030                 qcmd |= FIT_QCMD_MSGSIZE_512;
 
2031         else if (skmsg->length > 128)
 
2032                 qcmd |= FIT_QCMD_MSGSIZE_256;
 
2033         else if (skmsg->length > 64)
 
2034                 qcmd |= FIT_QCMD_MSGSIZE_128;
 
2037                  * This makes no sense because the FIT msg header is
 
2038                  * 64 bytes. If the msg is only 64 bytes long it has
 
2041                 qcmd |= FIT_QCMD_MSGSIZE_64;
 
2043         /* Make sure skd_msg_buf is written before the doorbell is triggered. */
 
2046         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
 
2049 static void skd_send_special_fitmsg(struct skd_device *skdev,
 
2050                                     struct skd_special_context *skspcl)
 
2054         if (unlikely(skdev->dbg_level > 1)) {
 
2055                 u8 *bp = (u8 *)skspcl->msg_buf;
 
2058                 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
 
2059                         dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
 
2065                 dev_dbg(&skdev->pdev->dev,
 
2066                         "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
 
2067                         skspcl, skspcl->req.id, skspcl->req.sksg_list,
 
2068                         skspcl->req.sksg_dma_address);
 
2069                 for (i = 0; i < skspcl->req.n_sg; i++) {
 
2070                         struct fit_sg_descriptor *sgd =
 
2071                                 &skspcl->req.sksg_list[i];
 
2073                         dev_dbg(&skdev->pdev->dev,
 
2074                                 "  sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
 
2075                                 i, sgd->byte_count, sgd->control,
 
2076                                 sgd->host_side_addr, sgd->next_desc_ptr);
 
2081          * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
 
2082          * and one 64-byte SSDI command.
 
2084         qcmd = skspcl->mb_dma_address;
 
2085         qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
 
2087         /* Make sure skd_msg_buf is written before the doorbell is triggered. */
 
2090         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
 
2094  *****************************************************************************
 
2096  *****************************************************************************
 
2099 static void skd_complete_other(struct skd_device *skdev,
 
2100                                struct fit_completion_entry_v1 *skcomp,
 
2101                                struct fit_comp_error_info *skerr);
 
2110         enum skd_check_status_action action;
 
2113 static struct sns_info skd_chkstat_table[] = {
 
2115         { 0x70, 0x02, RECOVERED_ERROR, 0,    0,    0x1c,
 
2116           SKD_CHECK_STATUS_REPORT_GOOD },
 
2119         { 0x70, 0x02, NO_SENSE,        0x0B, 0x00, 0x1E,        /* warnings */
 
2120           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
 
2121         { 0x70, 0x02, NO_SENSE,        0x5D, 0x00, 0x1E,        /* thresholds */
 
2122           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
 
2123         { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F,        /* temperature over trigger */
 
2124           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
 
2126         /* Retry (with limits) */
 
2127         { 0x70, 0x02, 0x0B,            0,    0,    0x1C,        /* This one is for DMA ERROR */
 
2128           SKD_CHECK_STATUS_REQUEUE_REQUEST },
 
2129         { 0x70, 0x02, 0x06,            0x0B, 0x00, 0x1E,        /* warnings */
 
2130           SKD_CHECK_STATUS_REQUEUE_REQUEST },
 
2131         { 0x70, 0x02, 0x06,            0x5D, 0x00, 0x1E,        /* thresholds */
 
2132           SKD_CHECK_STATUS_REQUEUE_REQUEST },
 
2133         { 0x70, 0x02, 0x06,            0x80, 0x30, 0x1F,        /* backup power */
 
2134           SKD_CHECK_STATUS_REQUEUE_REQUEST },
 
2136         /* Busy (or about to be) */
 
2137         { 0x70, 0x02, 0x06,            0x3f, 0x01, 0x1F, /* fw changed */
 
2138           SKD_CHECK_STATUS_BUSY_IMMINENT },
 
2142  * Look up status and sense data to decide how to handle the error
 
2144  * mask says which fields must match e.g., mask=0x18 means check
 
2145  * type and stat, ignore key, asc, ascq.
 
2148 static enum skd_check_status_action
 
2149 skd_check_status(struct skd_device *skdev,
 
2150                  u8 cmp_status, struct fit_comp_error_info *skerr)
 
2154         dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
 
2155                 skerr->key, skerr->code, skerr->qual, skerr->fruc);
 
2157         dev_dbg(&skdev->pdev->dev,
 
2158                 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
 
2159                 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
 
2162         /* Does the info match an entry in the good category? */
 
2163         for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
 
2164                 struct sns_info *sns = &skd_chkstat_table[i];
 
2166                 if (sns->mask & 0x10)
 
2167                         if (skerr->type != sns->type)
 
2170                 if (sns->mask & 0x08)
 
2171                         if (cmp_status != sns->stat)
 
2174                 if (sns->mask & 0x04)
 
2175                         if (skerr->key != sns->key)
 
2178                 if (sns->mask & 0x02)
 
2179                         if (skerr->code != sns->asc)
 
2182                 if (sns->mask & 0x01)
 
2183                         if (skerr->qual != sns->ascq)
 
2186                 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
 
2187                         dev_err(&skdev->pdev->dev,
 
2188                                 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
 
2189                                 skerr->key, skerr->code, skerr->qual);
 
2194         /* No other match, so nonzero status means error,
 
2195          * zero status means good
 
2198                 dev_dbg(&skdev->pdev->dev, "status check: error\n");
 
2199                 return SKD_CHECK_STATUS_REPORT_ERROR;
 
2202         dev_dbg(&skdev->pdev->dev, "status check good default\n");
 
2203         return SKD_CHECK_STATUS_REPORT_GOOD;
 
2206 static void skd_resolve_req_exception(struct skd_device *skdev,
 
2207                                       struct skd_request_context *skreq)
 
2209         u8 cmp_status = skreq->completion.status;
 
2211         switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
 
2212         case SKD_CHECK_STATUS_REPORT_GOOD:
 
2213         case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
 
2214                 skd_end_request(skdev, skreq, BLK_STS_OK);
 
2217         case SKD_CHECK_STATUS_BUSY_IMMINENT:
 
2218                 skd_log_skreq(skdev, skreq, "retry(busy)");
 
2219                 blk_requeue_request(skdev->queue, skreq->req);
 
2220                 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
 
2221                 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
 
2222                 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
 
2223                 skd_quiesce_dev(skdev);
 
2226         case SKD_CHECK_STATUS_REQUEUE_REQUEST:
 
2227                 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
 
2228                         skd_log_skreq(skdev, skreq, "retry");
 
2229                         blk_requeue_request(skdev->queue, skreq->req);
 
2234         case SKD_CHECK_STATUS_REPORT_ERROR:
 
2236                 skd_end_request(skdev, skreq, BLK_STS_IOERR);
 
2241 /* assume spinlock is already held */
 
2242 static void skd_release_skreq(struct skd_device *skdev,
 
2243                               struct skd_request_context *skreq)
 
2246         struct skd_fitmsg_context *skmsg;
 
2251          * Reclaim the FIT msg buffer if this is
 
2252          * the first of the requests it carried to
 
2253          * be completed. The FIT msg buffer used to
 
2254          * send this request cannot be reused until
 
2255          * we are sure the s1120 card has copied
 
2256          * it to its memory. The FIT msg might have
 
2257          * contained several requests. As soon as
 
2258          * any of them are completed we know that
 
2259          * the entire FIT msg was transferred.
 
2260          * Only the first completed request will
 
2261          * match the FIT msg buffer id. The FIT
 
2262          * msg buffer id is immediately updated.
 
2263          * When subsequent requests complete the FIT
 
2264          * msg buffer id won't match, so we know
 
2265          * quite cheaply that it is already done.
 
2267         msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
 
2268         SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
 
2270         skmsg = &skdev->skmsg_table[msg_slot];
 
2271         if (skmsg->id == skreq->fitmsg_id) {
 
2272                 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
 
2273                 SKD_ASSERT(skmsg->outstanding > 0);
 
2274                 skmsg->outstanding--;
 
2275                 if (skmsg->outstanding == 0) {
 
2276                         skmsg->state = SKD_MSG_STATE_IDLE;
 
2277                         skmsg->id += SKD_ID_INCR;
 
2278                         skmsg->next = skdev->skmsg_free_list;
 
2279                         skdev->skmsg_free_list = skmsg;
 
2284          * Decrease the number of active requests.
 
2285          * Also decrements the count in the timeout slot.
 
2287         SKD_ASSERT(skdev->in_flight > 0);
 
2288         skdev->in_flight -= 1;
 
2290         timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
 
2291         SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
 
2292         skdev->timeout_slot[timo_slot] -= 1;
 
2300          * Reclaim the skd_request_context
 
2302         skreq->state = SKD_REQ_STATE_IDLE;
 
2303         skreq->id += SKD_ID_INCR;
 
2304         skreq->next = skdev->skreq_free_list;
 
2305         skdev->skreq_free_list = skreq;
 
2308 #define DRIVER_INQ_EVPD_PAGE_CODE   0xDA
 
2310 static void skd_do_inq_page_00(struct skd_device *skdev,
 
2311                                struct fit_completion_entry_v1 *skcomp,
 
2312                                struct fit_comp_error_info *skerr,
 
2313                                uint8_t *cdb, uint8_t *buf)
 
2315         uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
 
2317         /* Caller requested "supported pages".  The driver needs to insert
 
2320         dev_dbg(&skdev->pdev->dev,
 
2321                 "skd_do_driver_inquiry: modify supported pages.\n");
 
2323         /* If the device rejected the request because the CDB was
 
2324          * improperly formed, then just leave.
 
2326         if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
 
2327             skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
 
2330         /* Get the amount of space the caller allocated */
 
2331         max_bytes = (cdb[3] << 8) | cdb[4];
 
2333         /* Get the number of pages actually returned by the device */
 
2334         drive_pages = (buf[2] << 8) | buf[3];
 
2335         drive_bytes = drive_pages + 4;
 
2336         new_size = drive_pages + 1;
 
2338         /* Supported pages must be in numerical order, so find where
 
2339          * the driver page needs to be inserted into the list of
 
2340          * pages returned by the device.
 
2342         for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
 
2343                 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
 
2344                         return; /* Device using this page code. abort */
 
2345                 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
 
2349         if (insert_pt < max_bytes) {
 
2352                 /* Shift everything up one byte to make room. */
 
2353                 for (u = new_size + 3; u > insert_pt; u--)
 
2354                         buf[u] = buf[u - 1];
 
2355                 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
 
2357                 /* SCSI byte order increment of num_returned_bytes by 1 */
 
2358                 skcomp->num_returned_bytes =
 
2359                         cpu_to_be32(be32_to_cpu(skcomp->num_returned_bytes) + 1);
 
2362         /* update page length field to reflect the driver's page too */
 
2363         buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
 
2364         buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
 
2367 static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
 
2373         pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
 
2376                 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
 
2378                 pci_bus_speed = linksta & 0xF;
 
2379                 pci_lanes = (linksta & 0x3F0) >> 4;
 
2381                 *speed = STEC_LINK_UNKNOWN;
 
2386         switch (pci_bus_speed) {
 
2388                 *speed = STEC_LINK_2_5GTS;
 
2391                 *speed = STEC_LINK_5GTS;
 
2394                 *speed = STEC_LINK_8GTS;
 
2397                 *speed = STEC_LINK_UNKNOWN;
 
2401         if (pci_lanes <= 0x20)
 
2407 static void skd_do_inq_page_da(struct skd_device *skdev,
 
2408                                struct fit_completion_entry_v1 *skcomp,
 
2409                                struct fit_comp_error_info *skerr,
 
2410                                uint8_t *cdb, uint8_t *buf)
 
2412         struct pci_dev *pdev = skdev->pdev;
 
2414         struct driver_inquiry_data inq;
 
2417         dev_dbg(&skdev->pdev->dev, "skd_do_driver_inquiry: return driver page\n");
 
2419         memset(&inq, 0, sizeof(inq));
 
2421         inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
 
2423         skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
 
2424         inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
 
2425         inq.pcie_device_number = PCI_SLOT(pdev->devfn);
 
2426         inq.pcie_function_number = PCI_FUNC(pdev->devfn);
 
2428         pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
 
2429         inq.pcie_vendor_id = cpu_to_be16(val);
 
2431         pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
 
2432         inq.pcie_device_id = cpu_to_be16(val);
 
2434         pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
 
2435         inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
 
2437         pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
 
2438         inq.pcie_subsystem_device_id = cpu_to_be16(val);
 
2440         /* Driver version, fixed lenth, padded with spaces on the right */
 
2441         inq.driver_version_length = sizeof(inq.driver_version);
 
2442         memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
 
2443         memcpy(inq.driver_version, DRV_VER_COMPL,
 
2444                min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
 
2446         inq.page_length = cpu_to_be16((sizeof(inq) - 4));
 
2448         /* Clear the error set by the device */
 
2449         skcomp->status = SAM_STAT_GOOD;
 
2450         memset((void *)skerr, 0, sizeof(*skerr));
 
2452         /* copy response into output buffer */
 
2453         max_bytes = (cdb[3] << 8) | cdb[4];
 
2454         memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
 
2456         skcomp->num_returned_bytes =
 
2457                 cpu_to_be32(min_t(uint16_t, max_bytes, sizeof(inq)));
 
2460 static void skd_do_driver_inq(struct skd_device *skdev,
 
2461                               struct fit_completion_entry_v1 *skcomp,
 
2462                               struct fit_comp_error_info *skerr,
 
2463                               uint8_t *cdb, uint8_t *buf)
 
2467         else if (cdb[0] != INQUIRY)
 
2468                 return;         /* Not an INQUIRY */
 
2469         else if ((cdb[1] & 1) == 0)
 
2470                 return;         /* EVPD not set */
 
2471         else if (cdb[2] == 0)
 
2472                 /* Need to add driver's page to supported pages list */
 
2473                 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
 
2474         else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
 
2475                 /* Caller requested driver's page */
 
2476                 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
 
2479 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
 
2488 static void skd_process_scsi_inq(struct skd_device *skdev,
 
2489                                  struct fit_completion_entry_v1 *skcomp,
 
2490                                  struct fit_comp_error_info *skerr,
 
2491                                  struct skd_special_context *skspcl)
 
2494         struct skd_scsi_request *scsi_req = &skspcl->msg_buf->scsi[0];
 
2496         dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
 
2497                             skspcl->req.data_dir);
 
2498         buf = skd_sg_1st_page_ptr(skspcl->req.sg);
 
2501                 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
 
2504 static int skd_isr_completion_posted(struct skd_device *skdev,
 
2505                                         int limit, int *enqueued)
 
2507         struct fit_completion_entry_v1 *skcmp;
 
2508         struct fit_comp_error_info *skerr;
 
2511         struct skd_request_context *skreq;
 
2519         lockdep_assert_held(&skdev->lock);
 
2522                 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
 
2524                 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
 
2525                 cmp_cycle = skcmp->cycle;
 
2526                 cmp_cntxt = skcmp->tag;
 
2527                 cmp_status = skcmp->status;
 
2528                 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
 
2530                 skerr = &skdev->skerr_table[skdev->skcomp_ix];
 
2532                 dev_dbg(&skdev->pdev->dev,
 
2533                         "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
 
2534                         skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
 
2535                         cmp_cntxt, cmp_status, skdev->in_flight, cmp_bytes,
 
2538                 if (cmp_cycle != skdev->skcomp_cycle) {
 
2539                         dev_dbg(&skdev->pdev->dev, "end of completions\n");
 
2543                  * Update the completion queue head index and possibly
 
2544                  * the completion cycle count. 8-bit wrap-around.
 
2547                 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
 
2548                         skdev->skcomp_ix = 0;
 
2549                         skdev->skcomp_cycle++;
 
2553                  * The command context is a unique 32-bit ID. The low order
 
2554                  * bits help locate the request. The request is usually a
 
2555                  * r/w request (see skd_start() above) or a special request.
 
2558                 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
 
2560                 /* Is this other than a r/w request? */
 
2561                 if (req_slot >= skdev->num_req_context) {
 
2563                          * This is not a completion for a r/w request.
 
2565                         skd_complete_other(skdev, skcmp, skerr);
 
2569                 skreq = &skdev->skreq_table[req_slot];
 
2572                  * Make sure the request ID for the slot matches.
 
2574                 if (skreq->id != req_id) {
 
2575                         dev_dbg(&skdev->pdev->dev,
 
2576                                 "mismatch comp_id=0x%x req_id=0x%x\n", req_id,
 
2579                                 u16 new_id = cmp_cntxt;
 
2580                                 dev_err(&skdev->pdev->dev,
 
2581                                         "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
 
2582                                         req_id, skreq->id, new_id);
 
2588                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
 
2590                 if (skreq->state == SKD_REQ_STATE_ABORTED) {
 
2591                         dev_dbg(&skdev->pdev->dev, "reclaim req %p id=%04x\n",
 
2593                         /* a previously timed out command can
 
2594                          * now be cleaned up */
 
2595                         skd_release_skreq(skdev, skreq);
 
2599                 skreq->completion = *skcmp;
 
2600                 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
 
2601                         skreq->err_info = *skerr;
 
2602                         skd_log_check_status(skdev, cmp_status, skerr->key,
 
2603                                              skerr->code, skerr->qual,
 
2606                 /* Release DMA resources for the request. */
 
2607                 if (skreq->n_sg > 0)
 
2608                         skd_postop_sg_list(skdev, skreq);
 
2611                         dev_dbg(&skdev->pdev->dev,
 
2612                                 "NULL backptr skdreq %p, req=0x%x req_id=0x%x\n",
 
2613                                 skreq, skreq->id, req_id);
 
2616                          * Capture the outcome and post it back to the
 
2619                         if (likely(cmp_status == SAM_STAT_GOOD))
 
2620                                 skd_end_request(skdev, skreq, BLK_STS_OK);
 
2622                                 skd_resolve_req_exception(skdev, skreq);
 
2626                  * Release the skreq, its FIT msg (if one), timeout slot,
 
2629                 skd_release_skreq(skdev, skreq);
 
2631                 /* skd_isr_comp_limit equal zero means no limit */
 
2633                         if (++processed >= limit) {
 
2640         if ((skdev->state == SKD_DRVR_STATE_PAUSING)
 
2641                 && (skdev->in_flight) == 0) {
 
2642                 skdev->state = SKD_DRVR_STATE_PAUSED;
 
2643                 wake_up_interruptible(&skdev->waitq);
 
2649 static void skd_complete_other(struct skd_device *skdev,
 
2650                                struct fit_completion_entry_v1 *skcomp,
 
2651                                struct fit_comp_error_info *skerr)
 
2656         struct skd_special_context *skspcl;
 
2658         lockdep_assert_held(&skdev->lock);
 
2660         req_id = skcomp->tag;
 
2661         req_table = req_id & SKD_ID_TABLE_MASK;
 
2662         req_slot = req_id & SKD_ID_SLOT_MASK;
 
2664         dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
 
2668          * Based on the request id, determine how to dispatch this completion.
 
2669          * This swich/case is finding the good cases and forwarding the
 
2670          * completion entry. Errors are reported below the switch.
 
2672         switch (req_table) {
 
2673         case SKD_ID_RW_REQUEST:
 
2675                  * The caller, skd_isr_completion_posted() above,
 
2676                  * handles r/w requests. The only way we get here
 
2677                  * is if the req_slot is out of bounds.
 
2681         case SKD_ID_SPECIAL_REQUEST:
 
2683                  * Make sure the req_slot is in bounds and that the id
 
2686                 if (req_slot < skdev->n_special) {
 
2687                         skspcl = &skdev->skspcl_table[req_slot];
 
2688                         if (skspcl->req.id == req_id &&
 
2689                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
 
2690                                 skd_complete_special(skdev,
 
2691                                                      skcomp, skerr, skspcl);
 
2697         case SKD_ID_INTERNAL:
 
2698                 if (req_slot == 0) {
 
2699                         skspcl = &skdev->internal_skspcl;
 
2700                         if (skspcl->req.id == req_id &&
 
2701                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
 
2702                                 skd_complete_internal(skdev,
 
2703                                                       skcomp, skerr, skspcl);
 
2709         case SKD_ID_FIT_MSG:
 
2711                  * These id's should never appear in a completion record.
 
2717                  * These id's should never appear anywhere;
 
2723          * If we get here it is a bad or stale id.
 
2727 static void skd_complete_special(struct skd_device *skdev,
 
2728                                  struct fit_completion_entry_v1 *skcomp,
 
2729                                  struct fit_comp_error_info *skerr,
 
2730                                  struct skd_special_context *skspcl)
 
2732         lockdep_assert_held(&skdev->lock);
 
2734         dev_dbg(&skdev->pdev->dev, " completing special request %p\n", skspcl);
 
2735         if (skspcl->orphaned) {
 
2736                 /* Discard orphaned request */
 
2737                 /* ?: Can this release directly or does it need
 
2738                  * to use a worker? */
 
2739                 dev_dbg(&skdev->pdev->dev, "release orphaned %p\n", skspcl);
 
2740                 skd_release_special(skdev, skspcl);
 
2744         skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
 
2746         skspcl->req.state = SKD_REQ_STATE_COMPLETED;
 
2747         skspcl->req.completion = *skcomp;
 
2748         skspcl->req.err_info = *skerr;
 
2750         skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
 
2751                              skerr->code, skerr->qual, skerr->fruc);
 
2753         wake_up_interruptible(&skdev->waitq);
 
2756 /* assume spinlock is already held */
 
2757 static void skd_release_special(struct skd_device *skdev,
 
2758                                 struct skd_special_context *skspcl)
 
2760         int i, was_depleted;
 
2762         for (i = 0; i < skspcl->req.n_sg; i++) {
 
2763                 struct page *page = sg_page(&skspcl->req.sg[i]);
 
2767         was_depleted = (skdev->skspcl_free_list == NULL);
 
2769         skspcl->req.state = SKD_REQ_STATE_IDLE;
 
2770         skspcl->req.id += SKD_ID_INCR;
 
2772                 (struct skd_request_context *)skdev->skspcl_free_list;
 
2773         skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
 
2776                 dev_dbg(&skdev->pdev->dev, "skspcl was depleted\n");
 
2777                 /* Free list was depleted. Their might be waiters. */
 
2778                 wake_up_interruptible(&skdev->waitq);
 
2782 static void skd_reset_skcomp(struct skd_device *skdev)
 
2784         memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
 
2786         skdev->skcomp_ix = 0;
 
2787         skdev->skcomp_cycle = 1;
 
2791  *****************************************************************************
 
2793  *****************************************************************************
 
2795 static void skd_completion_worker(struct work_struct *work)
 
2797         struct skd_device *skdev =
 
2798                 container_of(work, struct skd_device, completion_worker);
 
2799         unsigned long flags;
 
2800         int flush_enqueued = 0;
 
2802         spin_lock_irqsave(&skdev->lock, flags);
 
2805          * pass in limit=0, which means no limit..
 
2806          * process everything in compq
 
2808         skd_isr_completion_posted(skdev, 0, &flush_enqueued);
 
2809         skd_request_fn(skdev->queue);
 
2811         spin_unlock_irqrestore(&skdev->lock, flags);
 
2814 static void skd_isr_msg_from_dev(struct skd_device *skdev);
 
2817 skd_isr(int irq, void *ptr)
 
2819         struct skd_device *skdev = ptr;
 
2824         int flush_enqueued = 0;
 
2826         spin_lock(&skdev->lock);
 
2829                 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
 
2831                 ack = FIT_INT_DEF_MASK;
 
2834                 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
 
2837                 /* As long as there is an int pending on device, keep
 
2838                  * running loop.  When none, get out, but if we've never
 
2839                  * done any processing, call completion handler?
 
2842                         /* No interrupts on device, but run the completion
 
2846                                 if (likely (skdev->state
 
2847                                         == SKD_DRVR_STATE_ONLINE))
 
2854                 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
 
2856                 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
 
2857                            (skdev->state != SKD_DRVR_STATE_STOPPING))) {
 
2858                         if (intstat & FIT_ISH_COMPLETION_POSTED) {
 
2860                                  * If we have already deferred completion
 
2861                                  * processing, don't bother running it again
 
2865                                                 skd_isr_completion_posted(skdev,
 
2866                                                 skd_isr_comp_limit, &flush_enqueued);
 
2869                         if (intstat & FIT_ISH_FW_STATE_CHANGE) {
 
2870                                 skd_isr_fwstate(skdev);
 
2871                                 if (skdev->state == SKD_DRVR_STATE_FAULT ||
 
2873                                     SKD_DRVR_STATE_DISAPPEARED) {
 
2874                                         spin_unlock(&skdev->lock);
 
2879                         if (intstat & FIT_ISH_MSG_FROM_DEV)
 
2880                                 skd_isr_msg_from_dev(skdev);
 
2884         if (unlikely(flush_enqueued))
 
2885                 skd_request_fn(skdev->queue);
 
2888                 schedule_work(&skdev->completion_worker);
 
2889         else if (!flush_enqueued)
 
2890                 skd_request_fn(skdev->queue);
 
2892         spin_unlock(&skdev->lock);
 
2897 static void skd_drive_fault(struct skd_device *skdev)
 
2899         skdev->state = SKD_DRVR_STATE_FAULT;
 
2900         dev_err(&skdev->pdev->dev, "Drive FAULT\n");
 
2903 static void skd_drive_disappeared(struct skd_device *skdev)
 
2905         skdev->state = SKD_DRVR_STATE_DISAPPEARED;
 
2906         dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
 
2909 static void skd_isr_fwstate(struct skd_device *skdev)
 
2914         int prev_driver_state = skdev->state;
 
2916         sense = SKD_READL(skdev, FIT_STATUS);
 
2917         state = sense & FIT_SR_DRIVE_STATE_MASK;
 
2919         dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
 
2920                 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
 
2921                 skd_drive_state_to_str(state), state);
 
2923         skdev->drive_state = state;
 
2925         switch (skdev->drive_state) {
 
2926         case FIT_SR_DRIVE_INIT:
 
2927                 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
 
2928                         skd_disable_interrupts(skdev);
 
2931                 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
 
2932                         skd_recover_requests(skdev);
 
2933                 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
 
2934                         skdev->timer_countdown = SKD_STARTING_TIMO;
 
2935                         skdev->state = SKD_DRVR_STATE_STARTING;
 
2936                         skd_soft_reset(skdev);
 
2939                 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
 
2940                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
 
2941                 skdev->last_mtd = mtd;
 
2944         case FIT_SR_DRIVE_ONLINE:
 
2945                 skdev->cur_max_queue_depth = skd_max_queue_depth;
 
2946                 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
 
2947                         skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
 
2949                 skdev->queue_low_water_mark =
 
2950                         skdev->cur_max_queue_depth * 2 / 3 + 1;
 
2951                 if (skdev->queue_low_water_mark < 1)
 
2952                         skdev->queue_low_water_mark = 1;
 
2953                 dev_info(&skdev->pdev->dev,
 
2954                          "Queue depth limit=%d dev=%d lowat=%d\n",
 
2955                          skdev->cur_max_queue_depth,
 
2956                          skdev->dev_max_queue_depth,
 
2957                          skdev->queue_low_water_mark);
 
2959                 skd_refresh_device_data(skdev);
 
2962         case FIT_SR_DRIVE_BUSY:
 
2963                 skdev->state = SKD_DRVR_STATE_BUSY;
 
2964                 skdev->timer_countdown = SKD_BUSY_TIMO;
 
2965                 skd_quiesce_dev(skdev);
 
2967         case FIT_SR_DRIVE_BUSY_SANITIZE:
 
2968                 /* set timer for 3 seconds, we'll abort any unfinished
 
2969                  * commands after that expires
 
2971                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
 
2972                 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
 
2973                 blk_start_queue(skdev->queue);
 
2975         case FIT_SR_DRIVE_BUSY_ERASE:
 
2976                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
 
2977                 skdev->timer_countdown = SKD_BUSY_TIMO;
 
2979         case FIT_SR_DRIVE_OFFLINE:
 
2980                 skdev->state = SKD_DRVR_STATE_IDLE;
 
2982         case FIT_SR_DRIVE_SOFT_RESET:
 
2983                 switch (skdev->state) {
 
2984                 case SKD_DRVR_STATE_STARTING:
 
2985                 case SKD_DRVR_STATE_RESTARTING:
 
2986                         /* Expected by a caller of skd_soft_reset() */
 
2989                         skdev->state = SKD_DRVR_STATE_RESTARTING;
 
2993         case FIT_SR_DRIVE_FW_BOOTING:
 
2994                 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
 
2995                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
 
2996                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
 
2999         case FIT_SR_DRIVE_DEGRADED:
 
3000         case FIT_SR_PCIE_LINK_DOWN:
 
3001         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
 
3004         case FIT_SR_DRIVE_FAULT:
 
3005                 skd_drive_fault(skdev);
 
3006                 skd_recover_requests(skdev);
 
3007                 blk_start_queue(skdev->queue);
 
3010         /* PCIe bus returned all Fs? */
 
3012                 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
 
3014                 skd_drive_disappeared(skdev);
 
3015                 skd_recover_requests(skdev);
 
3016                 blk_start_queue(skdev->queue);
 
3020                  * Uknown FW State. Wait for a state we recognize.
 
3024         dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
 
3025                 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
 
3026                 skd_skdev_state_to_str(skdev->state), skdev->state);
 
3029 static void skd_recover_requests(struct skd_device *skdev)
 
3033         for (i = 0; i < skdev->num_req_context; i++) {
 
3034                 struct skd_request_context *skreq = &skdev->skreq_table[i];
 
3036                 if (skreq->state == SKD_REQ_STATE_BUSY) {
 
3037                         skd_log_skreq(skdev, skreq, "recover");
 
3039                         SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
 
3040                         SKD_ASSERT(skreq->req != NULL);
 
3042                         /* Release DMA resources for the request. */
 
3043                         if (skreq->n_sg > 0)
 
3044                                 skd_postop_sg_list(skdev, skreq);
 
3046                         skd_end_request(skdev, skreq, BLK_STS_IOERR);
 
3050                         skreq->state = SKD_REQ_STATE_IDLE;
 
3051                         skreq->id += SKD_ID_INCR;
 
3054                         skreq[-1].next = skreq;
 
3057         skdev->skreq_free_list = skdev->skreq_table;
 
3059         for (i = 0; i < skdev->num_fitmsg_context; i++) {
 
3060                 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
 
3062                 if (skmsg->state == SKD_MSG_STATE_BUSY) {
 
3063                         skd_log_skmsg(skdev, skmsg, "salvaged");
 
3064                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
 
3065                         skmsg->state = SKD_MSG_STATE_IDLE;
 
3066                         skmsg->id += SKD_ID_INCR;
 
3069                         skmsg[-1].next = skmsg;
 
3072         skdev->skmsg_free_list = skdev->skmsg_table;
 
3074         for (i = 0; i < skdev->n_special; i++) {
 
3075                 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
 
3077                 /* If orphaned, reclaim it because it has already been reported
 
3078                  * to the process as an error (it was just waiting for
 
3079                  * a completion that didn't come, and now it will never come)
 
3080                  * If busy, change to a state that will cause it to error
 
3081                  * out in the wait routine and let it do the normal
 
3082                  * reporting and reclaiming
 
3084                 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
 
3085                         if (skspcl->orphaned) {
 
3086                                 dev_dbg(&skdev->pdev->dev, "orphaned %p\n",
 
3088                                 skd_release_special(skdev, skspcl);
 
3090                                 dev_dbg(&skdev->pdev->dev, "not orphaned %p\n",
 
3092                                 skspcl->req.state = SKD_REQ_STATE_ABORTED;
 
3096         skdev->skspcl_free_list = skdev->skspcl_table;
 
3098         for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
 
3099                 skdev->timeout_slot[i] = 0;
 
3101         skdev->in_flight = 0;
 
3104 static void skd_isr_msg_from_dev(struct skd_device *skdev)
 
3110         mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
 
3112         dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
 
3115         /* ignore any mtd that is an ack for something we didn't send */
 
3116         if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
 
3119         switch (FIT_MXD_TYPE(mfd)) {
 
3120         case FIT_MTD_FITFW_INIT:
 
3121                 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
 
3123                 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
 
3124                         dev_err(&skdev->pdev->dev, "protocol mismatch\n");
 
3125                         dev_err(&skdev->pdev->dev, "  got=%d support=%d\n",
 
3126                                 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
 
3127                         dev_err(&skdev->pdev->dev, "  please upgrade driver\n");
 
3128                         skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
 
3129                         skd_soft_reset(skdev);
 
3132                 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
 
3133                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
 
3134                 skdev->last_mtd = mtd;
 
3137         case FIT_MTD_GET_CMDQ_DEPTH:
 
3138                 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
 
3139                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
 
3140                                    SKD_N_COMPLETION_ENTRY);
 
3141                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
 
3142                 skdev->last_mtd = mtd;
 
3145         case FIT_MTD_SET_COMPQ_DEPTH:
 
3146                 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
 
3147                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
 
3148                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
 
3149                 skdev->last_mtd = mtd;
 
3152         case FIT_MTD_SET_COMPQ_ADDR:
 
3153                 skd_reset_skcomp(skdev);
 
3154                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
 
3155                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
 
3156                 skdev->last_mtd = mtd;
 
3159         case FIT_MTD_CMD_LOG_HOST_ID:
 
3160                 skdev->connect_time_stamp = get_seconds();
 
3161                 data = skdev->connect_time_stamp & 0xFFFF;
 
3162                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
 
3163                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
 
3164                 skdev->last_mtd = mtd;
 
3167         case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
 
3168                 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
 
3169                 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
 
3170                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
 
3171                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
 
3172                 skdev->last_mtd = mtd;
 
3175         case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
 
3176                 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
 
3177                 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
 
3178                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
 
3179                 skdev->last_mtd = mtd;
 
3181                 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
 
3182                         skdev->connect_time_stamp, skdev->drive_jiffies);
 
3185         case FIT_MTD_ARM_QUEUE:
 
3186                 skdev->last_mtd = 0;
 
3188                  * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
 
3197 static void skd_disable_interrupts(struct skd_device *skdev)
 
3201         sense = SKD_READL(skdev, FIT_CONTROL);
 
3202         sense &= ~FIT_CR_ENABLE_INTERRUPTS;
 
3203         SKD_WRITEL(skdev, sense, FIT_CONTROL);
 
3204         dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
 
3206         /* Note that the 1s is written. A 1-bit means
 
3207          * disable, a 0 means enable.
 
3209         SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
 
3212 static void skd_enable_interrupts(struct skd_device *skdev)
 
3216         /* unmask interrupts first */
 
3217         val = FIT_ISH_FW_STATE_CHANGE +
 
3218               FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
 
3220         /* Note that the compliment of mask is written. A 1-bit means
 
3221          * disable, a 0 means enable. */
 
3222         SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
 
3223         dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
 
3225         val = SKD_READL(skdev, FIT_CONTROL);
 
3226         val |= FIT_CR_ENABLE_INTERRUPTS;
 
3227         dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
 
3228         SKD_WRITEL(skdev, val, FIT_CONTROL);
 
3232  *****************************************************************************
 
3233  * START, STOP, RESTART, QUIESCE, UNQUIESCE
 
3234  *****************************************************************************
 
3237 static void skd_soft_reset(struct skd_device *skdev)
 
3241         val = SKD_READL(skdev, FIT_CONTROL);
 
3242         val |= (FIT_CR_SOFT_RESET);
 
3243         dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
 
3244         SKD_WRITEL(skdev, val, FIT_CONTROL);
 
3247 static void skd_start_device(struct skd_device *skdev)
 
3249         unsigned long flags;
 
3253         spin_lock_irqsave(&skdev->lock, flags);
 
3255         /* ack all ghost interrupts */
 
3256         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
 
3258         sense = SKD_READL(skdev, FIT_STATUS);
 
3260         dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
 
3262         state = sense & FIT_SR_DRIVE_STATE_MASK;
 
3263         skdev->drive_state = state;
 
3264         skdev->last_mtd = 0;
 
3266         skdev->state = SKD_DRVR_STATE_STARTING;
 
3267         skdev->timer_countdown = SKD_STARTING_TIMO;
 
3269         skd_enable_interrupts(skdev);
 
3271         switch (skdev->drive_state) {
 
3272         case FIT_SR_DRIVE_OFFLINE:
 
3273                 dev_err(&skdev->pdev->dev, "Drive offline...\n");
 
3276         case FIT_SR_DRIVE_FW_BOOTING:
 
3277                 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
 
3278                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
 
3279                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
 
3282         case FIT_SR_DRIVE_BUSY_SANITIZE:
 
3283                 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
 
3284                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
 
3285                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
 
3288         case FIT_SR_DRIVE_BUSY_ERASE:
 
3289                 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
 
3290                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
 
3291                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
 
3294         case FIT_SR_DRIVE_INIT:
 
3295         case FIT_SR_DRIVE_ONLINE:
 
3296                 skd_soft_reset(skdev);
 
3299         case FIT_SR_DRIVE_BUSY:
 
3300                 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
 
3301                 skdev->state = SKD_DRVR_STATE_BUSY;
 
3302                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
 
3305         case FIT_SR_DRIVE_SOFT_RESET:
 
3306                 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
 
3309         case FIT_SR_DRIVE_FAULT:
 
3310                 /* Fault state is bad...soft reset won't do it...
 
3311                  * Hard reset, maybe, but does it work on device?
 
3312                  * For now, just fault so the system doesn't hang.
 
3314                 skd_drive_fault(skdev);
 
3315                 /*start the queue so we can respond with error to requests */
 
3316                 dev_dbg(&skdev->pdev->dev, "starting queue\n");
 
3317                 blk_start_queue(skdev->queue);
 
3318                 skdev->gendisk_on = -1;
 
3319                 wake_up_interruptible(&skdev->waitq);
 
3323                 /* Most likely the device isn't there or isn't responding
 
3324                  * to the BAR1 addresses. */
 
3325                 skd_drive_disappeared(skdev);
 
3326                 /*start the queue so we can respond with error to requests */
 
3327                 dev_dbg(&skdev->pdev->dev,
 
3328                         "starting queue to error-out reqs\n");
 
3329                 blk_start_queue(skdev->queue);
 
3330                 skdev->gendisk_on = -1;
 
3331                 wake_up_interruptible(&skdev->waitq);
 
3335                 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
 
3336                         skdev->drive_state);
 
3340         state = SKD_READL(skdev, FIT_CONTROL);
 
3341         dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
 
3343         state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
 
3344         dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
 
3346         state = SKD_READL(skdev, FIT_INT_MASK_HOST);
 
3347         dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
 
3349         state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
 
3350         dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
 
3352         state = SKD_READL(skdev, FIT_HW_VERSION);
 
3353         dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
 
3355         spin_unlock_irqrestore(&skdev->lock, flags);
 
3358 static void skd_stop_device(struct skd_device *skdev)
 
3360         unsigned long flags;
 
3361         struct skd_special_context *skspcl = &skdev->internal_skspcl;
 
3365         spin_lock_irqsave(&skdev->lock, flags);
 
3367         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
 
3368                 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
 
3372         if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
 
3373                 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
 
3377         skdev->state = SKD_DRVR_STATE_SYNCING;
 
3378         skdev->sync_done = 0;
 
3380         skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
 
3382         spin_unlock_irqrestore(&skdev->lock, flags);
 
3384         wait_event_interruptible_timeout(skdev->waitq,
 
3385                                          (skdev->sync_done), (10 * HZ));
 
3387         spin_lock_irqsave(&skdev->lock, flags);
 
3389         switch (skdev->sync_done) {
 
3391                 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
 
3394                 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
 
3397                 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
 
3401         skdev->state = SKD_DRVR_STATE_STOPPING;
 
3402         spin_unlock_irqrestore(&skdev->lock, flags);
 
3404         skd_kill_timer(skdev);
 
3406         spin_lock_irqsave(&skdev->lock, flags);
 
3407         skd_disable_interrupts(skdev);
 
3409         /* ensure all ints on device are cleared */
 
3410         /* soft reset the device to unload with a clean slate */
 
3411         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
 
3412         SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
 
3414         spin_unlock_irqrestore(&skdev->lock, flags);
 
3416         /* poll every 100ms, 1 second timeout */
 
3417         for (i = 0; i < 10; i++) {
 
3419                         SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
 
3420                 if (dev_state == FIT_SR_DRIVE_INIT)
 
3422                 set_current_state(TASK_INTERRUPTIBLE);
 
3423                 schedule_timeout(msecs_to_jiffies(100));
 
3426         if (dev_state != FIT_SR_DRIVE_INIT)
 
3427                 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
 
3431 /* assume spinlock is held */
 
3432 static void skd_restart_device(struct skd_device *skdev)
 
3436         /* ack all ghost interrupts */
 
3437         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
 
3439         state = SKD_READL(skdev, FIT_STATUS);
 
3441         dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
 
3443         state &= FIT_SR_DRIVE_STATE_MASK;
 
3444         skdev->drive_state = state;
 
3445         skdev->last_mtd = 0;
 
3447         skdev->state = SKD_DRVR_STATE_RESTARTING;
 
3448         skdev->timer_countdown = SKD_RESTARTING_TIMO;
 
3450         skd_soft_reset(skdev);
 
3453 /* assume spinlock is held */
 
3454 static int skd_quiesce_dev(struct skd_device *skdev)
 
3458         switch (skdev->state) {
 
3459         case SKD_DRVR_STATE_BUSY:
 
3460         case SKD_DRVR_STATE_BUSY_IMMINENT:
 
3461                 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
 
3462                 blk_stop_queue(skdev->queue);
 
3464         case SKD_DRVR_STATE_ONLINE:
 
3465         case SKD_DRVR_STATE_STOPPING:
 
3466         case SKD_DRVR_STATE_SYNCING:
 
3467         case SKD_DRVR_STATE_PAUSING:
 
3468         case SKD_DRVR_STATE_PAUSED:
 
3469         case SKD_DRVR_STATE_STARTING:
 
3470         case SKD_DRVR_STATE_RESTARTING:
 
3471         case SKD_DRVR_STATE_RESUMING:
 
3474                 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
 
3480 /* assume spinlock is held */
 
3481 static int skd_unquiesce_dev(struct skd_device *skdev)
 
3483         int prev_driver_state = skdev->state;
 
3485         skd_log_skdev(skdev, "unquiesce");
 
3486         if (skdev->state == SKD_DRVR_STATE_ONLINE) {
 
3487                 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
 
3490         if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
 
3492                  * If there has been an state change to other than
 
3493                  * ONLINE, we will rely on controller state change
 
3494                  * to come back online and restart the queue.
 
3495                  * The BUSY state means that driver is ready to
 
3496                  * continue normal processing but waiting for controller
 
3497                  * to become available.
 
3499                 skdev->state = SKD_DRVR_STATE_BUSY;
 
3500                 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
 
3505          * Drive has just come online, driver is either in startup,
 
3506          * paused performing a task, or bust waiting for hardware.
 
3508         switch (skdev->state) {
 
3509         case SKD_DRVR_STATE_PAUSED:
 
3510         case SKD_DRVR_STATE_BUSY:
 
3511         case SKD_DRVR_STATE_BUSY_IMMINENT:
 
3512         case SKD_DRVR_STATE_BUSY_ERASE:
 
3513         case SKD_DRVR_STATE_STARTING:
 
3514         case SKD_DRVR_STATE_RESTARTING:
 
3515         case SKD_DRVR_STATE_FAULT:
 
3516         case SKD_DRVR_STATE_IDLE:
 
3517         case SKD_DRVR_STATE_LOAD:
 
3518                 skdev->state = SKD_DRVR_STATE_ONLINE;
 
3519                 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
 
3520                         skd_skdev_state_to_str(prev_driver_state),
 
3521                         prev_driver_state, skd_skdev_state_to_str(skdev->state),
 
3523                 dev_dbg(&skdev->pdev->dev,
 
3524                         "**** device ONLINE...starting block queue\n");
 
3525                 dev_dbg(&skdev->pdev->dev, "starting queue\n");
 
3526                 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
 
3527                 blk_start_queue(skdev->queue);
 
3528                 skdev->gendisk_on = 1;
 
3529                 wake_up_interruptible(&skdev->waitq);
 
3532         case SKD_DRVR_STATE_DISAPPEARED:
 
3534                 dev_dbg(&skdev->pdev->dev,
 
3535                         "**** driver state %d, not implemented\n",
 
3543  *****************************************************************************
 
3544  * PCIe MSI/MSI-X INTERRUPT HANDLERS
 
3545  *****************************************************************************
 
3548 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
 
3550         struct skd_device *skdev = skd_host_data;
 
3551         unsigned long flags;
 
3553         spin_lock_irqsave(&skdev->lock, flags);
 
3554         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
 
3555                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
 
3556         dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
 
3557                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
 
3558         SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
 
3559         spin_unlock_irqrestore(&skdev->lock, flags);
 
3563 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
 
3565         struct skd_device *skdev = skd_host_data;
 
3566         unsigned long flags;
 
3568         spin_lock_irqsave(&skdev->lock, flags);
 
3569         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
 
3570                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
 
3571         SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
 
3572         skd_isr_fwstate(skdev);
 
3573         spin_unlock_irqrestore(&skdev->lock, flags);
 
3577 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
 
3579         struct skd_device *skdev = skd_host_data;
 
3580         unsigned long flags;
 
3581         int flush_enqueued = 0;
 
3584         spin_lock_irqsave(&skdev->lock, flags);
 
3585         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
 
3586                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
 
3587         SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
 
3588         deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
 
3591                 skd_request_fn(skdev->queue);
 
3594                 schedule_work(&skdev->completion_worker);
 
3595         else if (!flush_enqueued)
 
3596                 skd_request_fn(skdev->queue);
 
3598         spin_unlock_irqrestore(&skdev->lock, flags);
 
3603 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
 
3605         struct skd_device *skdev = skd_host_data;
 
3606         unsigned long flags;
 
3608         spin_lock_irqsave(&skdev->lock, flags);
 
3609         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
 
3610                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
 
3611         SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
 
3612         skd_isr_msg_from_dev(skdev);
 
3613         spin_unlock_irqrestore(&skdev->lock, flags);
 
3617 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
 
3619         struct skd_device *skdev = skd_host_data;
 
3620         unsigned long flags;
 
3622         spin_lock_irqsave(&skdev->lock, flags);
 
3623         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
 
3624                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
 
3625         SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
 
3626         spin_unlock_irqrestore(&skdev->lock, flags);
 
3631  *****************************************************************************
 
3632  * PCIe MSI/MSI-X SETUP
 
3633  *****************************************************************************
 
3636 struct skd_msix_entry {
 
3640 struct skd_init_msix_entry {
 
3642         irq_handler_t handler;
 
3645 #define SKD_MAX_MSIX_COUNT              13
 
3646 #define SKD_MIN_MSIX_COUNT              7
 
3647 #define SKD_BASE_MSIX_IRQ               4
 
3649 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
 
3650         { "(DMA 0)",        skd_reserved_isr },
 
3651         { "(DMA 1)",        skd_reserved_isr },
 
3652         { "(DMA 2)",        skd_reserved_isr },
 
3653         { "(DMA 3)",        skd_reserved_isr },
 
3654         { "(State Change)", skd_statec_isr   },
 
3655         { "(COMPL_Q)",      skd_comp_q       },
 
3656         { "(MSG)",          skd_msg_isr      },
 
3657         { "(Reserved)",     skd_reserved_isr },
 
3658         { "(Reserved)",     skd_reserved_isr },
 
3659         { "(Queue Full 0)", skd_qfull_isr    },
 
3660         { "(Queue Full 1)", skd_qfull_isr    },
 
3661         { "(Queue Full 2)", skd_qfull_isr    },
 
3662         { "(Queue Full 3)", skd_qfull_isr    },
 
3665 static int skd_acquire_msix(struct skd_device *skdev)
 
3668         struct pci_dev *pdev = skdev->pdev;
 
3670         rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
 
3673                 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
 
3677         skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
 
3678                         sizeof(struct skd_msix_entry), GFP_KERNEL);
 
3679         if (!skdev->msix_entries) {
 
3681                 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
 
3685         /* Enable MSI-X vectors for the base queue */
 
3686         for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
 
3687                 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
 
3689                 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
 
3690                          "%s%d-msix %s", DRV_NAME, skdev->devno,
 
3691                          msix_entries[i].name);
 
3693                 rc = devm_request_irq(&skdev->pdev->dev,
 
3694                                 pci_irq_vector(skdev->pdev, i),
 
3695                                 msix_entries[i].handler, 0,
 
3696                                 qentry->isr_name, skdev);
 
3698                         dev_err(&skdev->pdev->dev,
 
3699                                 "Unable to register(%d) MSI-X handler %d: %s\n",
 
3700                                 rc, i, qentry->isr_name);
 
3705         dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
 
3706                 SKD_MAX_MSIX_COUNT);
 
3711                 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
 
3713         kfree(skdev->msix_entries);
 
3714         skdev->msix_entries = NULL;
 
3718 static int skd_acquire_irq(struct skd_device *skdev)
 
3720         struct pci_dev *pdev = skdev->pdev;
 
3721         unsigned int irq_flag = PCI_IRQ_LEGACY;
 
3724         if (skd_isr_type == SKD_IRQ_MSIX) {
 
3725                 rc = skd_acquire_msix(skdev);
 
3729                 dev_err(&skdev->pdev->dev,
 
3730                         "failed to enable MSI-X, re-trying with MSI %d\n", rc);
 
3733         snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
 
3736         if (skd_isr_type != SKD_IRQ_LEGACY)
 
3737                 irq_flag |= PCI_IRQ_MSI;
 
3738         rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
 
3740                 dev_err(&skdev->pdev->dev,
 
3741                         "failed to allocate the MSI interrupt %d\n", rc);
 
3745         rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
 
3746                         pdev->msi_enabled ? 0 : IRQF_SHARED,
 
3747                         skdev->isr_name, skdev);
 
3749                 pci_free_irq_vectors(pdev);
 
3750                 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
 
3758 static void skd_release_irq(struct skd_device *skdev)
 
3760         struct pci_dev *pdev = skdev->pdev;
 
3762         if (skdev->msix_entries) {
 
3765                 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
 
3766                         devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
 
3770                 kfree(skdev->msix_entries);
 
3771                 skdev->msix_entries = NULL;
 
3773                 devm_free_irq(&pdev->dev, pdev->irq, skdev);
 
3776         pci_free_irq_vectors(pdev);
 
3780  *****************************************************************************
 
3782  *****************************************************************************
 
3785 static int skd_cons_skcomp(struct skd_device *skdev)
 
3788         struct fit_completion_entry_v1 *skcomp;
 
3790         dev_dbg(&skdev->pdev->dev,
 
3791                 "comp pci_alloc, total bytes %zd entries %d\n",
 
3792                 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
 
3794         skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
 
3795                                        &skdev->cq_dma_address);
 
3797         if (skcomp == NULL) {
 
3802         skdev->skcomp_table = skcomp;
 
3803         skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
 
3805                                                            SKD_N_COMPLETION_ENTRY);
 
3811 static int skd_cons_skmsg(struct skd_device *skdev)
 
3816         dev_dbg(&skdev->pdev->dev,
 
3817                 "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
 
3818                 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
 
3819                 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
 
3821         skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
 
3822                                      sizeof(struct skd_fitmsg_context),
 
3824         if (skdev->skmsg_table == NULL) {
 
3829         for (i = 0; i < skdev->num_fitmsg_context; i++) {
 
3830                 struct skd_fitmsg_context *skmsg;
 
3832                 skmsg = &skdev->skmsg_table[i];
 
3834                 skmsg->id = i + SKD_ID_FIT_MSG;
 
3836                 skmsg->state = SKD_MSG_STATE_IDLE;
 
3837                 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
 
3839                                                       &skmsg->mb_dma_address);
 
3841                 if (skmsg->msg_buf == NULL) {
 
3846                 WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
 
3847                      (FIT_QCMD_ALIGN - 1),
 
3848                      "not aligned: msg_buf %p mb_dma_address %#llx\n",
 
3849                      skmsg->msg_buf, skmsg->mb_dma_address);
 
3850                 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
 
3852                 skmsg->next = &skmsg[1];
 
3855         /* Free list is in order starting with the 0th entry. */
 
3856         skdev->skmsg_table[i - 1].next = NULL;
 
3857         skdev->skmsg_free_list = skdev->skmsg_table;
 
3863 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
 
3865                                                   dma_addr_t *ret_dma_addr)
 
3867         struct fit_sg_descriptor *sg_list;
 
3870         nbytes = sizeof(*sg_list) * n_sg;
 
3872         sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
 
3874         if (sg_list != NULL) {
 
3875                 uint64_t dma_address = *ret_dma_addr;
 
3878                 memset(sg_list, 0, nbytes);
 
3880                 for (i = 0; i < n_sg - 1; i++) {
 
3882                         ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
 
3884                         sg_list[i].next_desc_ptr = dma_address + ndp_off;
 
3886                 sg_list[i].next_desc_ptr = 0LL;
 
3892 static int skd_cons_skreq(struct skd_device *skdev)
 
3897         dev_dbg(&skdev->pdev->dev,
 
3898                 "skreq_table kcalloc, struct %lu, count %u total %lu\n",
 
3899                 sizeof(struct skd_request_context), skdev->num_req_context,
 
3900                 sizeof(struct skd_request_context) * skdev->num_req_context);
 
3902         skdev->skreq_table = kcalloc(skdev->num_req_context,
 
3903                                      sizeof(struct skd_request_context),
 
3905         if (skdev->skreq_table == NULL) {
 
3910         dev_dbg(&skdev->pdev->dev, "alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
 
3911                 skdev->sgs_per_request, sizeof(struct scatterlist),
 
3912                 skdev->sgs_per_request * sizeof(struct scatterlist));
 
3914         for (i = 0; i < skdev->num_req_context; i++) {
 
3915                 struct skd_request_context *skreq;
 
3917                 skreq = &skdev->skreq_table[i];
 
3919                 skreq->id = i + SKD_ID_RW_REQUEST;
 
3920                 skreq->state = SKD_REQ_STATE_IDLE;
 
3922                 skreq->sg = kcalloc(skdev->sgs_per_request,
 
3923                                     sizeof(struct scatterlist), GFP_KERNEL);
 
3924                 if (skreq->sg == NULL) {
 
3928                 sg_init_table(skreq->sg, skdev->sgs_per_request);
 
3930                 skreq->sksg_list = skd_cons_sg_list(skdev,
 
3931                                                     skdev->sgs_per_request,
 
3932                                                     &skreq->sksg_dma_address);
 
3934                 if (skreq->sksg_list == NULL) {
 
3939                 skreq->next = &skreq[1];
 
3942         /* Free list is in order starting with the 0th entry. */
 
3943         skdev->skreq_table[i - 1].next = NULL;
 
3944         skdev->skreq_free_list = skdev->skreq_table;
 
3950 static int skd_cons_skspcl(struct skd_device *skdev)
 
3955         dev_dbg(&skdev->pdev->dev,
 
3956                 "skspcl_table kcalloc, struct %lu, count %u total %lu\n",
 
3957                 sizeof(struct skd_special_context), skdev->n_special,
 
3958                 sizeof(struct skd_special_context) * skdev->n_special);
 
3960         skdev->skspcl_table = kcalloc(skdev->n_special,
 
3961                                       sizeof(struct skd_special_context),
 
3963         if (skdev->skspcl_table == NULL) {
 
3968         for (i = 0; i < skdev->n_special; i++) {
 
3969                 struct skd_special_context *skspcl;
 
3971                 skspcl = &skdev->skspcl_table[i];
 
3973                 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
 
3974                 skspcl->req.state = SKD_REQ_STATE_IDLE;
 
3976                 skspcl->req.next = &skspcl[1].req;
 
3978                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
 
3981                         pci_zalloc_consistent(skdev->pdev, nbytes,
 
3982                                               &skspcl->mb_dma_address);
 
3983                 if (skspcl->msg_buf == NULL) {
 
3988                 skspcl->req.sg = kcalloc(SKD_N_SG_PER_SPECIAL,
 
3989                                          sizeof(struct scatterlist),
 
3991                 if (skspcl->req.sg == NULL) {
 
3996                 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
 
3997                                                          SKD_N_SG_PER_SPECIAL,
 
4000                 if (skspcl->req.sksg_list == NULL) {
 
4006         /* Free list is in order starting with the 0th entry. */
 
4007         skdev->skspcl_table[i - 1].req.next = NULL;
 
4008         skdev->skspcl_free_list = skdev->skspcl_table;
 
4016 static int skd_cons_sksb(struct skd_device *skdev)
 
4019         struct skd_special_context *skspcl;
 
4022         skspcl = &skdev->internal_skspcl;
 
4024         skspcl->req.id = 0 + SKD_ID_INTERNAL;
 
4025         skspcl->req.state = SKD_REQ_STATE_IDLE;
 
4027         nbytes = SKD_N_INTERNAL_BYTES;
 
4029         skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
 
4030                                                  &skspcl->db_dma_address);
 
4031         if (skspcl->data_buf == NULL) {
 
4036         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
 
4037         skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
 
4038                                                 &skspcl->mb_dma_address);
 
4039         if (skspcl->msg_buf == NULL) {
 
4044         skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
 
4045                                                  &skspcl->req.sksg_dma_address);
 
4046         if (skspcl->req.sksg_list == NULL) {
 
4051         if (!skd_format_internal_skspcl(skdev)) {
 
4060 static int skd_cons_disk(struct skd_device *skdev)
 
4063         struct gendisk *disk;
 
4064         struct request_queue *q;
 
4065         unsigned long flags;
 
4067         disk = alloc_disk(SKD_MINORS_PER_DEVICE);
 
4074         sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
 
4076         disk->major = skdev->major;
 
4077         disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
 
4078         disk->fops = &skd_blockdev_ops;
 
4079         disk->private_data = skdev;
 
4081         q = blk_init_queue(skd_request_fn, &skdev->lock);
 
4086         blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
 
4090         q->queuedata = skdev;
 
4092         blk_queue_write_cache(q, true, true);
 
4093         blk_queue_max_segments(q, skdev->sgs_per_request);
 
4094         blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
 
4096         /* set optimal I/O size to 8KB */
 
4097         blk_queue_io_opt(q, 8192);
 
4099         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
 
4100         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
 
4102         spin_lock_irqsave(&skdev->lock, flags);
 
4103         dev_dbg(&skdev->pdev->dev, "stopping queue\n");
 
4104         blk_stop_queue(skdev->queue);
 
4105         spin_unlock_irqrestore(&skdev->lock, flags);
 
4111 #define SKD_N_DEV_TABLE         16u
 
4112 static u32 skd_next_devno;
 
4114 static struct skd_device *skd_construct(struct pci_dev *pdev)
 
4116         struct skd_device *skdev;
 
4117         int blk_major = skd_major;
 
4120         skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
 
4123                 dev_err(&pdev->dev, "memory alloc failure\n");
 
4127         skdev->state = SKD_DRVR_STATE_LOAD;
 
4129         skdev->devno = skd_next_devno++;
 
4130         skdev->major = blk_major;
 
4131         skdev->dev_max_queue_depth = 0;
 
4133         skdev->num_req_context = skd_max_queue_depth;
 
4134         skdev->num_fitmsg_context = skd_max_queue_depth;
 
4135         skdev->n_special = skd_max_pass_thru;
 
4136         skdev->cur_max_queue_depth = 1;
 
4137         skdev->queue_low_water_mark = 1;
 
4138         skdev->proto_ver = 99;
 
4139         skdev->sgs_per_request = skd_sgs_per_request;
 
4140         skdev->dbg_level = skd_dbg_level;
 
4142         spin_lock_init(&skdev->lock);
 
4144         INIT_WORK(&skdev->completion_worker, skd_completion_worker);
 
4146         dev_dbg(&skdev->pdev->dev, "skcomp\n");
 
4147         rc = skd_cons_skcomp(skdev);
 
4151         dev_dbg(&skdev->pdev->dev, "skmsg\n");
 
4152         rc = skd_cons_skmsg(skdev);
 
4156         dev_dbg(&skdev->pdev->dev, "skreq\n");
 
4157         rc = skd_cons_skreq(skdev);
 
4161         dev_dbg(&skdev->pdev->dev, "skspcl\n");
 
4162         rc = skd_cons_skspcl(skdev);
 
4166         dev_dbg(&skdev->pdev->dev, "sksb\n");
 
4167         rc = skd_cons_sksb(skdev);
 
4171         dev_dbg(&skdev->pdev->dev, "disk\n");
 
4172         rc = skd_cons_disk(skdev);
 
4176         dev_dbg(&skdev->pdev->dev, "VICTORY\n");
 
4180         dev_dbg(&skdev->pdev->dev, "construct failed\n");
 
4181         skd_destruct(skdev);
 
4186  *****************************************************************************
 
4188  *****************************************************************************
 
4191 static void skd_free_skcomp(struct skd_device *skdev)
 
4193         if (skdev->skcomp_table)
 
4194                 pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
 
4195                                     skdev->skcomp_table, skdev->cq_dma_address);
 
4197         skdev->skcomp_table = NULL;
 
4198         skdev->cq_dma_address = 0;
 
4201 static void skd_free_skmsg(struct skd_device *skdev)
 
4205         if (skdev->skmsg_table == NULL)
 
4208         for (i = 0; i < skdev->num_fitmsg_context; i++) {
 
4209                 struct skd_fitmsg_context *skmsg;
 
4211                 skmsg = &skdev->skmsg_table[i];
 
4213                 if (skmsg->msg_buf != NULL) {
 
4214                         pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
 
4216                                             skmsg->mb_dma_address);
 
4218                 skmsg->msg_buf = NULL;
 
4219                 skmsg->mb_dma_address = 0;
 
4222         kfree(skdev->skmsg_table);
 
4223         skdev->skmsg_table = NULL;
 
4226 static void skd_free_sg_list(struct skd_device *skdev,
 
4227                              struct fit_sg_descriptor *sg_list,
 
4228                              u32 n_sg, dma_addr_t dma_addr)
 
4230         if (sg_list != NULL) {
 
4233                 nbytes = sizeof(*sg_list) * n_sg;
 
4235                 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
 
4239 static void skd_free_skreq(struct skd_device *skdev)
 
4243         if (skdev->skreq_table == NULL)
 
4246         for (i = 0; i < skdev->num_req_context; i++) {
 
4247                 struct skd_request_context *skreq;
 
4249                 skreq = &skdev->skreq_table[i];
 
4251                 skd_free_sg_list(skdev, skreq->sksg_list,
 
4252                                  skdev->sgs_per_request,
 
4253                                  skreq->sksg_dma_address);
 
4255                 skreq->sksg_list = NULL;
 
4256                 skreq->sksg_dma_address = 0;
 
4261         kfree(skdev->skreq_table);
 
4262         skdev->skreq_table = NULL;
 
4265 static void skd_free_skspcl(struct skd_device *skdev)
 
4270         if (skdev->skspcl_table == NULL)
 
4273         for (i = 0; i < skdev->n_special; i++) {
 
4274                 struct skd_special_context *skspcl;
 
4276                 skspcl = &skdev->skspcl_table[i];
 
4278                 if (skspcl->msg_buf != NULL) {
 
4279                         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
 
4280                         pci_free_consistent(skdev->pdev, nbytes,
 
4282                                             skspcl->mb_dma_address);
 
4285                 skspcl->msg_buf = NULL;
 
4286                 skspcl->mb_dma_address = 0;
 
4288                 skd_free_sg_list(skdev, skspcl->req.sksg_list,
 
4289                                  SKD_N_SG_PER_SPECIAL,
 
4290                                  skspcl->req.sksg_dma_address);
 
4292                 skspcl->req.sksg_list = NULL;
 
4293                 skspcl->req.sksg_dma_address = 0;
 
4295                 kfree(skspcl->req.sg);
 
4298         kfree(skdev->skspcl_table);
 
4299         skdev->skspcl_table = NULL;
 
4302 static void skd_free_sksb(struct skd_device *skdev)
 
4304         struct skd_special_context *skspcl;
 
4307         skspcl = &skdev->internal_skspcl;
 
4309         if (skspcl->data_buf != NULL) {
 
4310                 nbytes = SKD_N_INTERNAL_BYTES;
 
4312                 pci_free_consistent(skdev->pdev, nbytes,
 
4313                                     skspcl->data_buf, skspcl->db_dma_address);
 
4316         skspcl->data_buf = NULL;
 
4317         skspcl->db_dma_address = 0;
 
4319         if (skspcl->msg_buf != NULL) {
 
4320                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
 
4321                 pci_free_consistent(skdev->pdev, nbytes,
 
4322                                     skspcl->msg_buf, skspcl->mb_dma_address);
 
4325         skspcl->msg_buf = NULL;
 
4326         skspcl->mb_dma_address = 0;
 
4328         skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
 
4329                          skspcl->req.sksg_dma_address);
 
4331         skspcl->req.sksg_list = NULL;
 
4332         skspcl->req.sksg_dma_address = 0;
 
4335 static void skd_free_disk(struct skd_device *skdev)
 
4337         struct gendisk *disk = skdev->disk;
 
4339         if (disk && (disk->flags & GENHD_FL_UP))
 
4343                 blk_cleanup_queue(skdev->queue);
 
4344                 skdev->queue = NULL;
 
4352 static void skd_destruct(struct skd_device *skdev)
 
4357         dev_dbg(&skdev->pdev->dev, "disk\n");
 
4358         skd_free_disk(skdev);
 
4360         dev_dbg(&skdev->pdev->dev, "sksb\n");
 
4361         skd_free_sksb(skdev);
 
4363         dev_dbg(&skdev->pdev->dev, "skspcl\n");
 
4364         skd_free_skspcl(skdev);
 
4366         dev_dbg(&skdev->pdev->dev, "skreq\n");
 
4367         skd_free_skreq(skdev);
 
4369         dev_dbg(&skdev->pdev->dev, "skmsg\n");
 
4370         skd_free_skmsg(skdev);
 
4372         dev_dbg(&skdev->pdev->dev, "skcomp\n");
 
4373         skd_free_skcomp(skdev);
 
4375         dev_dbg(&skdev->pdev->dev, "skdev\n");
 
4380  *****************************************************************************
 
4381  * BLOCK DEVICE (BDEV) GLUE
 
4382  *****************************************************************************
 
4385 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 
4387         struct skd_device *skdev;
 
4390         skdev = bdev->bd_disk->private_data;
 
4392         dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
 
4393                 bdev->bd_disk->disk_name, current->comm);
 
4395         if (skdev->read_cap_is_valid) {
 
4396                 capacity = get_capacity(skdev->disk);
 
4399                 geo->cylinders = (capacity) / (255 * 64);
 
4406 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
 
4408         dev_dbg(&skdev->pdev->dev, "add_disk\n");
 
4409         device_add_disk(parent, skdev->disk);
 
4413 static const struct block_device_operations skd_blockdev_ops = {
 
4414         .owner          = THIS_MODULE,
 
4415         .ioctl          = skd_bdev_ioctl,
 
4416         .getgeo         = skd_bdev_getgeo,
 
4420  *****************************************************************************
 
4422  *****************************************************************************
 
4425 static const struct pci_device_id skd_pci_tbl[] = {
 
4426         { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
 
4427           PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
 
4428         { 0 }                     /* terminate list */
 
4431 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
 
4433 static char *skd_pci_info(struct skd_device *skdev, char *str)
 
4437         strcpy(str, "PCIe (");
 
4438         pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
 
4443                 uint16_t pcie_lstat, lspeed, lwidth;
 
4446                 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
 
4447                 lspeed = pcie_lstat & (0xF);
 
4448                 lwidth = (pcie_lstat & 0x3F0) >> 4;
 
4451                         strcat(str, "2.5GT/s ");
 
4452                 else if (lspeed == 2)
 
4453                         strcat(str, "5.0GT/s ");
 
4455                         strcat(str, "<unknown> ");
 
4456                 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
 
4462 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
4467         struct skd_device *skdev;
 
4469         dev_info(&pdev->dev, "STEC s1120 Driver(%s) version %s-b%s\n",
 
4470                  DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
 
4471         dev_info(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
 
4474         rc = pci_enable_device(pdev);
 
4477         rc = pci_request_regions(pdev, DRV_NAME);
 
4480         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
 
4482                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
 
4483                         dev_err(&pdev->dev, "consistent DMA mask error %d\n",
 
4487                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 
4489                         dev_err(&pdev->dev, "DMA mask error %d\n", rc);
 
4490                         goto err_out_regions;
 
4495                 rc = register_blkdev(0, DRV_NAME);
 
4497                         goto err_out_regions;
 
4502         skdev = skd_construct(pdev);
 
4503         if (skdev == NULL) {
 
4505                 goto err_out_regions;
 
4508         skd_pci_info(skdev, pci_str);
 
4509         dev_info(&pdev->dev, "%s 64bit\n", pci_str);
 
4511         pci_set_master(pdev);
 
4512         rc = pci_enable_pcie_error_reporting(pdev);
 
4515                         "bad enable of PCIe error reporting rc=%d\n", rc);
 
4516                 skdev->pcie_error_reporting_is_enabled = 0;
 
4518                 skdev->pcie_error_reporting_is_enabled = 1;
 
4520         pci_set_drvdata(pdev, skdev);
 
4522         for (i = 0; i < SKD_MAX_BARS; i++) {
 
4523                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
 
4524                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
 
4525                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
 
4526                                             skdev->mem_size[i]);
 
4527                 if (!skdev->mem_map[i]) {
 
4529                                 "Unable to map adapter memory!\n");
 
4531                         goto err_out_iounmap;
 
4533                 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
 
4534                         skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
 
4535                         skdev->mem_size[i]);
 
4538         rc = skd_acquire_irq(skdev);
 
4540                 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
 
4541                 goto err_out_iounmap;
 
4544         rc = skd_start_timer(skdev);
 
4548         init_waitqueue_head(&skdev->waitq);
 
4550         skd_start_device(skdev);
 
4552         rc = wait_event_interruptible_timeout(skdev->waitq,
 
4553                                               (skdev->gendisk_on),
 
4554                                               (SKD_START_WAIT_SECONDS * HZ));
 
4555         if (skdev->gendisk_on > 0) {
 
4556                 /* device came on-line after reset */
 
4557                 skd_bdev_attach(&pdev->dev, skdev);
 
4560                 /* we timed out, something is wrong with the device,
 
4561                    don't add the disk structure */
 
4562                 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
 
4564                 /* in case of no error; we timeout with ENXIO */
 
4573         skd_stop_device(skdev);
 
4574         skd_release_irq(skdev);
 
4577         for (i = 0; i < SKD_MAX_BARS; i++)
 
4578                 if (skdev->mem_map[i])
 
4579                         iounmap(skdev->mem_map[i]);
 
4581         if (skdev->pcie_error_reporting_is_enabled)
 
4582                 pci_disable_pcie_error_reporting(pdev);
 
4584         skd_destruct(skdev);
 
4587         pci_release_regions(pdev);
 
4590         pci_disable_device(pdev);
 
4591         pci_set_drvdata(pdev, NULL);
 
4595 static void skd_pci_remove(struct pci_dev *pdev)
 
4598         struct skd_device *skdev;
 
4600         skdev = pci_get_drvdata(pdev);
 
4602                 dev_err(&pdev->dev, "no device data for PCI\n");
 
4605         skd_stop_device(skdev);
 
4606         skd_release_irq(skdev);
 
4608         for (i = 0; i < SKD_MAX_BARS; i++)
 
4609                 if (skdev->mem_map[i])
 
4610                         iounmap(skdev->mem_map[i]);
 
4612         if (skdev->pcie_error_reporting_is_enabled)
 
4613                 pci_disable_pcie_error_reporting(pdev);
 
4615         skd_destruct(skdev);
 
4617         pci_release_regions(pdev);
 
4618         pci_disable_device(pdev);
 
4619         pci_set_drvdata(pdev, NULL);
 
4624 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 
4627         struct skd_device *skdev;
 
4629         skdev = pci_get_drvdata(pdev);
 
4631                 dev_err(&pdev->dev, "no device data for PCI\n");
 
4635         skd_stop_device(skdev);
 
4637         skd_release_irq(skdev);
 
4639         for (i = 0; i < SKD_MAX_BARS; i++)
 
4640                 if (skdev->mem_map[i])
 
4641                         iounmap(skdev->mem_map[i]);
 
4643         if (skdev->pcie_error_reporting_is_enabled)
 
4644                 pci_disable_pcie_error_reporting(pdev);
 
4646         pci_release_regions(pdev);
 
4647         pci_save_state(pdev);
 
4648         pci_disable_device(pdev);
 
4649         pci_set_power_state(pdev, pci_choose_state(pdev, state));
 
4653 static int skd_pci_resume(struct pci_dev *pdev)
 
4657         struct skd_device *skdev;
 
4659         skdev = pci_get_drvdata(pdev);
 
4661                 dev_err(&pdev->dev, "no device data for PCI\n");
 
4665         pci_set_power_state(pdev, PCI_D0);
 
4666         pci_enable_wake(pdev, PCI_D0, 0);
 
4667         pci_restore_state(pdev);
 
4669         rc = pci_enable_device(pdev);
 
4672         rc = pci_request_regions(pdev, DRV_NAME);
 
4675         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
 
4677                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
 
4679                         dev_err(&pdev->dev, "consistent DMA mask error %d\n",
 
4683                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 
4686                         dev_err(&pdev->dev, "DMA mask error %d\n", rc);
 
4687                         goto err_out_regions;
 
4691         pci_set_master(pdev);
 
4692         rc = pci_enable_pcie_error_reporting(pdev);
 
4695                         "bad enable of PCIe error reporting rc=%d\n", rc);
 
4696                 skdev->pcie_error_reporting_is_enabled = 0;
 
4698                 skdev->pcie_error_reporting_is_enabled = 1;
 
4700         for (i = 0; i < SKD_MAX_BARS; i++) {
 
4702                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
 
4703                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
 
4704                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
 
4705                                             skdev->mem_size[i]);
 
4706                 if (!skdev->mem_map[i]) {
 
4707                         dev_err(&pdev->dev, "Unable to map adapter memory!\n");
 
4709                         goto err_out_iounmap;
 
4711                 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
 
4712                         skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
 
4713                         skdev->mem_size[i]);
 
4715         rc = skd_acquire_irq(skdev);
 
4717                 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
 
4718                 goto err_out_iounmap;
 
4721         rc = skd_start_timer(skdev);
 
4725         init_waitqueue_head(&skdev->waitq);
 
4727         skd_start_device(skdev);
 
4732         skd_stop_device(skdev);
 
4733         skd_release_irq(skdev);
 
4736         for (i = 0; i < SKD_MAX_BARS; i++)
 
4737                 if (skdev->mem_map[i])
 
4738                         iounmap(skdev->mem_map[i]);
 
4740         if (skdev->pcie_error_reporting_is_enabled)
 
4741                 pci_disable_pcie_error_reporting(pdev);
 
4744         pci_release_regions(pdev);
 
4747         pci_disable_device(pdev);
 
4751 static void skd_pci_shutdown(struct pci_dev *pdev)
 
4753         struct skd_device *skdev;
 
4755         dev_err(&pdev->dev, "%s called\n", __func__);
 
4757         skdev = pci_get_drvdata(pdev);
 
4759                 dev_err(&pdev->dev, "no device data for PCI\n");
 
4763         dev_err(&pdev->dev, "calling stop\n");
 
4764         skd_stop_device(skdev);
 
4767 static struct pci_driver skd_driver = {
 
4769         .id_table       = skd_pci_tbl,
 
4770         .probe          = skd_pci_probe,
 
4771         .remove         = skd_pci_remove,
 
4772         .suspend        = skd_pci_suspend,
 
4773         .resume         = skd_pci_resume,
 
4774         .shutdown       = skd_pci_shutdown,
 
4778  *****************************************************************************
 
4780  *****************************************************************************
 
4783 const char *skd_drive_state_to_str(int state)
 
4786         case FIT_SR_DRIVE_OFFLINE:
 
4788         case FIT_SR_DRIVE_INIT:
 
4790         case FIT_SR_DRIVE_ONLINE:
 
4792         case FIT_SR_DRIVE_BUSY:
 
4794         case FIT_SR_DRIVE_FAULT:
 
4796         case FIT_SR_DRIVE_DEGRADED:
 
4798         case FIT_SR_PCIE_LINK_DOWN:
 
4800         case FIT_SR_DRIVE_SOFT_RESET:
 
4801                 return "SOFT_RESET";
 
4802         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
 
4804         case FIT_SR_DRIVE_INIT_FAULT:
 
4805                 return "INIT_FAULT";
 
4806         case FIT_SR_DRIVE_BUSY_SANITIZE:
 
4807                 return "BUSY_SANITIZE";
 
4808         case FIT_SR_DRIVE_BUSY_ERASE:
 
4809                 return "BUSY_ERASE";
 
4810         case FIT_SR_DRIVE_FW_BOOTING:
 
4811                 return "FW_BOOTING";
 
4817 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
 
4820         case SKD_DRVR_STATE_LOAD:
 
4822         case SKD_DRVR_STATE_IDLE:
 
4824         case SKD_DRVR_STATE_BUSY:
 
4826         case SKD_DRVR_STATE_STARTING:
 
4828         case SKD_DRVR_STATE_ONLINE:
 
4830         case SKD_DRVR_STATE_PAUSING:
 
4832         case SKD_DRVR_STATE_PAUSED:
 
4834         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
 
4835                 return "DRAINING_TIMEOUT";
 
4836         case SKD_DRVR_STATE_RESTARTING:
 
4837                 return "RESTARTING";
 
4838         case SKD_DRVR_STATE_RESUMING:
 
4840         case SKD_DRVR_STATE_STOPPING:
 
4842         case SKD_DRVR_STATE_SYNCING:
 
4844         case SKD_DRVR_STATE_FAULT:
 
4846         case SKD_DRVR_STATE_DISAPPEARED:
 
4847                 return "DISAPPEARED";
 
4848         case SKD_DRVR_STATE_BUSY_ERASE:
 
4849                 return "BUSY_ERASE";
 
4850         case SKD_DRVR_STATE_BUSY_SANITIZE:
 
4851                 return "BUSY_SANITIZE";
 
4852         case SKD_DRVR_STATE_BUSY_IMMINENT:
 
4853                 return "BUSY_IMMINENT";
 
4854         case SKD_DRVR_STATE_WAIT_BOOT:
 
4862 static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
 
4865         case SKD_MSG_STATE_IDLE:
 
4867         case SKD_MSG_STATE_BUSY:
 
4874 static const char *skd_skreq_state_to_str(enum skd_req_state state)
 
4877         case SKD_REQ_STATE_IDLE:
 
4879         case SKD_REQ_STATE_SETUP:
 
4881         case SKD_REQ_STATE_BUSY:
 
4883         case SKD_REQ_STATE_COMPLETED:
 
4885         case SKD_REQ_STATE_TIMEOUT:
 
4887         case SKD_REQ_STATE_ABORTED:
 
4894 static void skd_log_skdev(struct skd_device *skdev, const char *event)
 
4896         dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
 
4897         dev_dbg(&skdev->pdev->dev, "  drive_state=%s(%d) driver_state=%s(%d)\n",
 
4898                 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
 
4899                 skd_skdev_state_to_str(skdev->state), skdev->state);
 
4900         dev_dbg(&skdev->pdev->dev, "  busy=%d limit=%d dev=%d lowat=%d\n",
 
4901                 skdev->in_flight, skdev->cur_max_queue_depth,
 
4902                 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
 
4903         dev_dbg(&skdev->pdev->dev, "  timestamp=0x%x cycle=%d cycle_ix=%d\n",
 
4904                 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
 
4907 static void skd_log_skmsg(struct skd_device *skdev,
 
4908                           struct skd_fitmsg_context *skmsg, const char *event)
 
4910         dev_dbg(&skdev->pdev->dev, "skmsg=%p event='%s'\n", skmsg, event);
 
4911         dev_dbg(&skdev->pdev->dev, "  state=%s(%d) id=0x%04x length=%d\n",
 
4912                 skd_skmsg_state_to_str(skmsg->state), skmsg->state, skmsg->id,
 
4916 static void skd_log_skreq(struct skd_device *skdev,
 
4917                           struct skd_request_context *skreq, const char *event)
 
4919         dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
 
4920         dev_dbg(&skdev->pdev->dev, "  state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
 
4921                 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
 
4923         dev_dbg(&skdev->pdev->dev, "  timo=0x%x sg_dir=%d n_sg=%d\n",
 
4924                 skreq->timeout_stamp, skreq->data_dir, skreq->n_sg);
 
4926         if (skreq->req != NULL) {
 
4927                 struct request *req = skreq->req;
 
4928                 u32 lba = (u32)blk_rq_pos(req);
 
4929                 u32 count = blk_rq_sectors(req);
 
4931                 dev_dbg(&skdev->pdev->dev,
 
4932                         "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req,
 
4933                         lba, lba, count, count, (int)rq_data_dir(req));
 
4935                 dev_dbg(&skdev->pdev->dev, "req=NULL\n");
 
4939  *****************************************************************************
 
4941  *****************************************************************************
 
4944 static int __init skd_init(void)
 
4946         BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
 
4947         BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
 
4948         BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
 
4949         BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
 
4950         BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
 
4951         BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
 
4952         BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
 
4953         BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
 
4955         pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
 
4957         switch (skd_isr_type) {
 
4958         case SKD_IRQ_LEGACY:
 
4963                 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
 
4964                        skd_isr_type, SKD_IRQ_DEFAULT);
 
4965                 skd_isr_type = SKD_IRQ_DEFAULT;
 
4968         if (skd_max_queue_depth < 1 ||
 
4969             skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
 
4970                 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
 
4971                        skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
 
4972                 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
 
4975         if (skd_max_req_per_msg < 1 ||
 
4976             skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
 
4977                 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
 
4978                        skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
 
4979                 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
 
4982         if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
 
4983                 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
 
4984                        skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
 
4985                 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
 
4988         if (skd_dbg_level < 0 || skd_dbg_level > 2) {
 
4989                 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
 
4994         if (skd_isr_comp_limit < 0) {
 
4995                 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
 
4996                        skd_isr_comp_limit, 0);
 
4997                 skd_isr_comp_limit = 0;
 
5000         if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
 
5001                 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
 
5002                        skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
 
5003                 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
 
5006         return pci_register_driver(&skd_driver);
 
5009 static void __exit skd_exit(void)
 
5011         pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
 
5013         pci_unregister_driver(&skd_driver);
 
5016                 unregister_blkdev(skd_major, DRV_NAME);
 
5019 module_init(skd_init);
 
5020 module_exit(skd_exit);