staging: kpc2000: kpc_dma: rearrange lines exceeding 100 columns
authorDeepak R Varma <mh12gx2825@gmail.com>
Mon, 26 Oct 2020 17:56:09 +0000 (23:26 +0530)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 27 Oct 2020 11:23:53 +0000 (12:23 +0100)
Reformat lines that exceed 100 column in length. Issue reported by
checkpatch script.

Signed-off-by: Deepak R Varma <mh12gx2825@gmail.com>
Link: https://lore.kernel.org/r/c853e015ec460b909a3e2cd529bc0f69093bce3e.1603734679.git.mh12gx2825@gmail.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/kpc2000/kpc_dma/dma.c
drivers/staging/kpc2000/kpc_dma/fileops.c
drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c

index 452a3f7c835dc4cf5f4799940cf81224e620e65a..e169ac609ba49af4efb0facf29fbec4b5aae4971 100644 (file)
@@ -16,7 +16,8 @@ irqreturn_t  ndd_irq_handler(int irq, void *dev_id)
 {
        struct kpc_dma_device *ldev = (struct kpc_dma_device *)dev_id;
 
-       if ((GetEngineControl(ldev) & ENG_CTL_IRQ_ACTIVE) || (ldev->desc_completed->MyDMAAddr != GetEngineCompletePtr(ldev)))
+       if ((GetEngineControl(ldev) & ENG_CTL_IRQ_ACTIVE) ||
+           (ldev->desc_completed->MyDMAAddr != GetEngineCompletePtr(ldev)))
                schedule_work(&ldev->irq_work);
 
        return IRQ_HANDLED;
@@ -39,7 +40,8 @@ void  ndd_irq_worker(struct work_struct *ws)
        cur = eng->desc_completed;
        do {
                cur = cur->Next;
-               dev_dbg(&eng->pldev->dev, "Handling completed descriptor %p (acd = %p)\n", cur, cur->acd);
+               dev_dbg(&eng->pldev->dev, "Handling completed descriptor %p (acd = %p)\n",
+                       cur, cur->acd);
                BUG_ON(cur == eng->desc_next); // Ordering failure.
 
                if (cur->DescControlFlags & DMA_DESC_CTL_SOP) {
@@ -56,7 +58,8 @@ void  ndd_irq_worker(struct work_struct *ws)
 
                if (cur->DescControlFlags & DMA_DESC_CTL_EOP) {
                        if (cur->acd)
-                               transfer_complete_cb(cur->acd, eng->accumulated_bytes, eng->accumulated_flags | ACD_FLAG_DONE);
+                               transfer_complete_cb(cur->acd, eng->accumulated_bytes,
+                                                    eng->accumulated_flags | ACD_FLAG_DONE);
                }
 
                eng->desc_completed = cur;
@@ -103,7 +106,9 @@ int  setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt)
                eng->dir = DMA_TO_DEVICE;
 
        eng->desc_pool_cnt = desc_cnt;
-       eng->desc_pool = dma_pool_create("KPC DMA Descriptors", &eng->pldev->dev, sizeof(struct kpc_dma_descriptor), DMA_DESC_ALIGNMENT, 4096);
+       eng->desc_pool = dma_pool_create("KPC DMA Descriptors", &eng->pldev->dev,
+                                        sizeof(struct kpc_dma_descriptor),
+                                        DMA_DESC_ALIGNMENT, 4096);
 
        eng->desc_pool_first = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &head_handle);
        if (!eng->desc_pool_first) {
@@ -141,7 +146,8 @@ int  setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt)
        INIT_WORK(&eng->irq_work, ndd_irq_worker);
 
        // Grab IRQ line
-       rv = request_irq(eng->irq, ndd_irq_handler, IRQF_SHARED, KP_DRIVER_NAME_DMA_CONTROLLER, eng);
+       rv = request_irq(eng->irq, ndd_irq_handler, IRQF_SHARED,
+                        KP_DRIVER_NAME_DMA_CONTROLLER, eng);
        if (rv) {
                dev_err(&eng->pldev->dev, "%s: failed to request_irq: %d\n", __func__, rv);
                return rv;
@@ -195,7 +201,10 @@ void  stop_dma_engine(struct kpc_dma_device *eng)
        }
 
        // Clear any persistent bits just to make sure there is no residue from the reset
-       SetClearEngineControl(eng, (ENG_CTL_IRQ_ACTIVE | ENG_CTL_DESC_COMPLETE | ENG_CTL_DESC_ALIGN_ERR | ENG_CTL_DESC_FETCH_ERR | ENG_CTL_SW_ABORT_ERR | ENG_CTL_DESC_CHAIN_END | ENG_CTL_DMA_WAITING_PERSIST), 0);
+       SetClearEngineControl(eng, (ENG_CTL_IRQ_ACTIVE | ENG_CTL_DESC_COMPLETE |
+                                   ENG_CTL_DESC_ALIGN_ERR | ENG_CTL_DESC_FETCH_ERR |
+                                   ENG_CTL_SW_ABORT_ERR | ENG_CTL_DESC_CHAIN_END |
+                                   ENG_CTL_DMA_WAITING_PERSIST), 0);
 
        // Reset performance counters
 
index e1c7c04f16fe4b3102b5669e0d9dcb06833666eb..10dcd6646b0107fd5cf238d8c8ec67c738a6ce09 100644 (file)
@@ -76,7 +76,8 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
 
        // Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
        mmap_read_lock(current->mm);      /*  get memory map semaphore */
-       rv = pin_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE, acd->user_pages, NULL);
+       rv = pin_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE,
+                           acd->user_pages, NULL);
        mmap_read_unlock(current->mm);        /*  release the semaphore */
        if (rv != acd->page_count) {
                nr_pages = rv;
@@ -89,16 +90,19 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
        nr_pages = acd->page_count;
 
        // Allocate and setup the sg_table (scatterlist entries)
-       rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE - 1), iov_len, GFP_KERNEL);
+       rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count,
+                                      iov_base & (PAGE_SIZE - 1), iov_len, GFP_KERNEL);
        if (rv) {
                dev_err(&priv->ldev->pldev->dev, "Couldn't alloc sg_table (%d)\n", rv);
                goto unpin_pages;
        }
 
        // Setup the DMA mapping for all the sg entries
-       acd->mapped_entry_count = dma_map_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir);
+       acd->mapped_entry_count = dma_map_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents,
+                                            ldev->dir);
        if (acd->mapped_entry_count <= 0) {
-               dev_err(&priv->ldev->pldev->dev, "Couldn't dma_map_sg (%d)\n", acd->mapped_entry_count);
+               dev_err(&priv->ldev->pldev->dev, "Couldn't dma_map_sg (%d)\n",
+                       acd->mapped_entry_count);
                goto free_table;
        }
 
@@ -111,14 +115,21 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
 
        // Figoure out how many descriptors are available and return an error if there aren't enough
        num_descrs_avail = count_descriptors_available(ldev);
-       dev_dbg(&priv->ldev->pldev->dev, "    mapped_entry_count = %d    num_descrs_needed = %d    num_descrs_avail = %d\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
+       dev_dbg(&priv->ldev->pldev->dev,
+               "    mapped_entry_count = %d    num_descrs_needed = %d    num_descrs_avail = %d\n",
+               acd->mapped_entry_count, desc_needed, num_descrs_avail);
+
        if (desc_needed >= ldev->desc_pool_cnt) {
-               dev_warn(&priv->ldev->pldev->dev, "    mapped_entry_count = %d    num_descrs_needed = %d    num_descrs_avail = %d    TOO MANY to ever complete!\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
+               dev_warn(&priv->ldev->pldev->dev,
+                        "    mapped_entry_count = %d    num_descrs_needed = %d    num_descrs_avail = %d    TOO MANY to ever complete!\n",
+                        acd->mapped_entry_count, desc_needed, num_descrs_avail);
                rv = -EAGAIN;
                goto err_descr_too_many;
        }
        if (desc_needed > num_descrs_avail) {
-               dev_warn(&priv->ldev->pldev->dev, "    mapped_entry_count = %d    num_descrs_needed = %d    num_descrs_avail = %d    Too many to complete right now.\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
+               dev_warn(&priv->ldev->pldev->dev,
+                        "    mapped_entry_count = %d    num_descrs_needed = %d    num_descrs_avail = %d    Too many to complete right now.\n",
+                        acd->mapped_entry_count, desc_needed, num_descrs_avail);
                rv = -EMSGSIZE;
                goto err_descr_too_many;
        }
@@ -163,7 +174,8 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
                        if (i == acd->mapped_entry_count - 1 && p == pcnt - 1)
                                desc->acd = acd;
 
-                       dev_dbg(&priv->ldev->pldev->dev, "  Filled descriptor %p (acd = %p)\n", desc, desc->acd);
+                       dev_dbg(&priv->ldev->pldev->dev, "  Filled descriptor %p (acd = %p)\n",
+                               desc, desc->acd);
 
                        ldev->desc_next = desc->Next;
                        desc = desc->Next;
index 624d47bae4d1a8b78bfd691f7e5b709d2ccce575..e1dac89ca6a25d39e57739cd037863ca4b521724 100644 (file)
@@ -138,7 +138,8 @@ int  kpc_dma_probe(struct platform_device *pldev)
 
        // Setup miscdev struct
        dev = MKDEV(assigned_major_num, pldev->id);
-       ldev->kpc_dma_dev = device_create(kpc_dma_class, &pldev->dev, dev, ldev, "kpc_dma%d", pldev->id);
+       ldev->kpc_dma_dev = device_create(kpc_dma_class, &pldev->dev, dev, ldev,
+                                         "kpc_dma%d", pldev->id);
        if (IS_ERR(ldev->kpc_dma_dev)) {
                rv = PTR_ERR(ldev->kpc_dma_dev);
                dev_err(&ldev->pldev->dev, "%s: device_create failed: %d\n", __func__, rv);
@@ -205,9 +206,11 @@ int __init kpc_dma_driver_init(void)
 {
        int err;
 
-       err = __register_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma", &kpc_dma_fops);
+       err = __register_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS,
+                               "kpc_dma", &kpc_dma_fops);
        if (err < 0) {
-               pr_err("Can't allocate a major number (%d) for kpc_dma (err = %d)\n", KPC_DMA_CHAR_MAJOR, err);
+               pr_err("Can't allocate a major number (%d) for kpc_dma (err = %d)\n",
+                      KPC_DMA_CHAR_MAJOR, err);
                goto fail_chrdev_register;
        }
        assigned_major_num = err;