#define QEMU_MBOX_IEN 0x8
#define QEMU_MBOX_TXDONE 0x10
+#define QEMU_MBOX_DATA 0x0
+#define QEMU_MBOX_ACK 0x4
+
#define QEMU_MBOX_CHAN_STRIDE 0x20
#define QEMU_MBOX_CHAN_ADDR(chan) (QEMU_MBOX_CHAN_STRIDE + (chan * QEMU_MBOX_CHAN_STRIDE))
+#define QEMU_MBOX_MIN_IRQ 2
+#define QEMU_MBOX_MAX_IRQ (QEMU_MBOX_MAX_CHAN_CNT * 2)
+
+struct qemu_mbox_vec {
+ struct qemu_mbox *parent;
+ u8 idx;
+};
+
struct qemu_mbox {
struct device *dev;
struct regmap *map;
struct mbox_controller mbox;
+ struct qemu_mbox_vec *vecs;
};
static struct qemu_mbox *__mbox;
return IRQ_HANDLED;
}
+static int qemu_mbox_setup_simple_irqs(struct pci_dev *pdev,
+ struct qemu_mbox *qemu_mbox)
+{
+ struct device *const dev = &pdev->dev;
+ int err;
+
+ err = devm_request_irq(dev, pci_irq_vector(pdev, 0),
+ qemu_mbox_isr, 0,
+ "qemu_mbox_isr", qemu_mbox);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Can't claim irq\n");
+
+ err = devm_request_irq(dev, pci_irq_vector(pdev, 1),
+ qemu_mbox_txdone_isr, 0,
+ "qemu_mbox_txdone_isr", qemu_mbox);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Can't claim txdone irq\n");
+
+ return 0;
+}
+
+static irqreturn_t qemu_mbox_vec_isr(int virq, void *data)
+{
+ struct qemu_mbox_vec *vec = data;
+ struct qemu_mbox *mbox = vec->parent;
+ struct mbox_chan *chan = &mbox->mbox.chans[vec->idx];
+ u32 msg = 1;
+
+ regmap_write(mbox->map, QEMU_MBOX_CHAN_ADDR(vec->idx) + QEMU_MBOX_ACK, 1);
+ mbox_chan_received_data(chan, &msg);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qemu_mbox_txdone_vec_isr(int virq, void *data)
+{
+ struct qemu_mbox_vec *vec = data;
+ struct qemu_mbox *mbox = vec->parent;
+ struct mbox_chan *chan = &mbox->mbox.chans[vec->idx];
+
+ mbox_chan_txdone(chan, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int qemu_mbox_setup_vec_irqs(struct pci_dev *pdev,
+ struct qemu_mbox *qemu_mbox)
+{
+ struct device *const dev = &pdev->dev;
+ struct qemu_mbox_vec *vecs;
+ unsigned int i;
+ int rc;
+
+ vecs = devm_kmalloc_array(dev, QEMU_MBOX_MAX_IRQ,
+ sizeof(vecs[0]), GFP_KERNEL);
+ if (!vecs)
+ return -ENOMEM;
+
+ for (i = 0; i < QEMU_MBOX_MAX_CHAN_CNT; i++) {
+ rc = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ qemu_mbox_vec_isr, 0,
+ "qemu_mbox_vec_isr", &vecs[i]);
+ if (rc)
+ return rc;
+
+ vecs[i].parent = qemu_mbox;
+ vecs[i].idx = i;
+ }
+
+ for (;i < QEMU_MBOX_MAX_IRQ; i++) {
+ rc = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ qemu_mbox_txdone_vec_isr, 0,
+ "qemu_mbox_txdone_vec_isr", &vecs[i]);
+ if (rc)
+ return rc;
+
+ vecs[i].parent = qemu_mbox;
+ vecs[i].idx = i - QEMU_MBOX_MAX_CHAN_CNT;
+ }
+
+ qemu_mbox->vecs = vecs;
+
+ return 0;
+}
+
+static int qemu_mbox_setup_irqs(struct pci_dev *pdev, struct qemu_mbox *qemu_mbox)
+{
+ struct device *const dev = &pdev->dev;
+ int err;
+
+ err = pci_alloc_irq_vectors(pdev,
+ QEMU_MBOX_MIN_IRQ,
+ QEMU_MBOX_MAX_IRQ,
+ PCI_IRQ_MSIX);
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "Unable to allocate irqs\n");
+
+ switch (err) {
+ case QEMU_MBOX_MIN_IRQ:
+ err = qemu_mbox_setup_simple_irqs(pdev, qemu_mbox);
+ break;
+ case QEMU_MBOX_MAX_IRQ:
+ err = qemu_mbox_setup_vec_irqs(pdev, qemu_mbox);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
struct mbox_chan *qemu_mbox_request_channel(struct mbox_client *cl)
{
struct qemu_mbox *mbox = __mbox;
return dev_err_probe(dev, PTR_ERR(qemu_mbox->map),
"Unable to initialize register map\n");
- err = pci_alloc_irq_vectors(pdev, 2, 2, PCI_IRQ_MSIX);
- if (err < 0)
- return dev_err_probe(dev, err,
- "Unable to allocate irqs\n");
-
- err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 0),
- qemu_mbox_isr, 0,
- "qemu_mbox_isr", qemu_mbox);
+ err = qemu_mbox_setup_irqs(pdev, qemu_mbox);
if (err)
- return dev_err_probe(dev, err,
- "Can't claim irq\n");
-
- err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 1),
- qemu_mbox_txdone_isr, 0,
- "qemu_mbox_txdone_isr", qemu_mbox);
- if (err)
- return dev_err_probe(dev, err,
- "Can't claim txdone irq\n");
+ return err;
pci_set_master(pdev);