Semaphore to mutex conversion.
The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
 #include <linux/uio.h>
 #include <linux/cdev.h>
 #include <linux/device.h>
+#include <linux/mutex.h>
 
 #include <asm/uaccess.h>
 
 
 static struct class *raw_class;
 static struct raw_device_data raw_devices[MAX_RAW_MINORS];
-static DECLARE_MUTEX(raw_mutex);
+static DEFINE_MUTEX(raw_mutex);
 static struct file_operations raw_ctl_fops;         /* forward declaration */
 
 /*
                return 0;
        }
 
-       down(&raw_mutex);
+       mutex_lock(&raw_mutex);
 
        /*
         * All we need to do on open is check that the device is bound.
                filp->f_dentry->d_inode->i_mapping =
                        bdev->bd_inode->i_mapping;
        filp->private_data = bdev;
-       up(&raw_mutex);
+       mutex_unlock(&raw_mutex);
        return 0;
 
 out2:
 out1:
        blkdev_put(bdev);
 out:
-       up(&raw_mutex);
+       mutex_unlock(&raw_mutex);
        return err;
 }
 
        const int minor= iminor(inode);
        struct block_device *bdev;
 
-       down(&raw_mutex);
+       mutex_lock(&raw_mutex);
        bdev = raw_devices[minor].binding;
        if (--raw_devices[minor].inuse == 0) {
                /* Here  inode->i_mapping == bdev->bd_inode->i_mapping  */
                inode->i_mapping = &inode->i_data;
                inode->i_mapping->backing_dev_info = &default_backing_dev_info;
        }
-       up(&raw_mutex);
+       mutex_unlock(&raw_mutex);
 
        bd_release(bdev);
        blkdev_put(bdev);
                                goto out;
                        }
 
-                       down(&raw_mutex);
+                       mutex_lock(&raw_mutex);
                        if (rawdev->inuse) {
-                               up(&raw_mutex);
+                               mutex_unlock(&raw_mutex);
                                err = -EBUSY;
                                goto out;
                        }
                                        bind_device(&rq);
                                }
                        }
-                       up(&raw_mutex);
+                       mutex_unlock(&raw_mutex);
                } else {
                        struct block_device *bdev;
 
-                       down(&raw_mutex);
+                       mutex_lock(&raw_mutex);
                        bdev = rawdev->binding;
                        if (bdev) {
                                rq.block_major = MAJOR(bdev->bd_dev);
                        } else {
                                rq.block_major = rq.block_minor = 0;
                        }
-                       up(&raw_mutex);
+                       mutex_unlock(&raw_mutex);
                        if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) {
                                err = -EFAULT;
                                goto out;
 
 #include <linux/netlink.h>
 #include <linux/moduleparam.h>
 #include <linux/connector.h>
+#include <linux/mutex.h>
 
 #include <net/sock.h>
 
 MODULE_PARM_DESC(cn_idx, "Connector's main device idx.");
 MODULE_PARM_DESC(cn_val, "Connector's main device val.");
 
-static DECLARE_MUTEX(notify_lock);
+static DEFINE_MUTEX(notify_lock);
 static LIST_HEAD(notify_list);
 
 static struct cn_dev cdev;
 {
        struct cn_ctl_entry *ent;
 
-       down(¬ify_lock);
+       mutex_lock(¬ify_lock);
        list_for_each_entry(ent, ¬ify_list, notify_entry) {
                int i;
                struct cn_notify_req *req;
                        cn_netlink_send(&m, ctl->group, GFP_KERNEL);
                }
        }
-       up(¬ify_lock);
+       mutex_unlock(¬ify_lock);
 }
 
 /*
        if (ctl->group == 0) {
                struct cn_ctl_entry *n;
 
-               down(¬ify_lock);
+               mutex_lock(¬ify_lock);
                list_for_each_entry_safe(ent, n, ¬ify_list, notify_entry) {
                        if (cn_ctl_msg_equals(ent->msg, ctl)) {
                                list_del(&ent->notify_entry);
                                kfree(ent);
                        }
                }
-               up(¬ify_lock);
+               mutex_unlock(¬ify_lock);
 
                return;
        }
 
        memcpy(ent->msg, ctl, size - sizeof(*ent));
 
-       down(¬ify_lock);
+       mutex_lock(¬ify_lock);
        list_add(&ent->notify_entry, ¬ify_list);
-       up(¬ify_lock);
+       mutex_unlock(¬ify_lock);
 }
 
 static int __init cn_init(void)
 
 #include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <linux/mutex.h>
 #include <asm/io.h>
 #include <asm/semaphore.h>
 
 static dma_addr_t smi_data_buf_handle;
 static unsigned long smi_data_buf_size;
 static u32 smi_data_buf_phys_addr;
-static DECLARE_MUTEX(smi_data_lock);
+static DEFINE_MUTEX(smi_data_lock);
 
 static unsigned int host_control_action;
 static unsigned int host_control_smi_type;
        buf_size = simple_strtoul(buf, NULL, 10);
 
        /* make sure SMI data buffer is at least buf_size */
-       down(&smi_data_lock);
+       mutex_lock(&smi_data_lock);
        ret = smi_data_buf_realloc(buf_size);
-       up(&smi_data_lock);
+       mutex_unlock(&smi_data_lock);
        if (ret)
                return ret;
 
        size_t max_read;
        ssize_t ret;
 
-       down(&smi_data_lock);
+       mutex_lock(&smi_data_lock);
 
        if (pos >= smi_data_buf_size) {
                ret = 0;
        ret = min(max_read, count);
        memcpy(buf, smi_data_buf + pos, ret);
 out:
-       up(&smi_data_lock);
+       mutex_unlock(&smi_data_lock);
        return ret;
 }
 
 {
        ssize_t ret;
 
-       down(&smi_data_lock);
+       mutex_lock(&smi_data_lock);
 
        ret = smi_data_buf_realloc(pos + count);
        if (ret)
        memcpy(smi_data_buf + pos, buf, count);
        ret = count;
 out:
-       up(&smi_data_lock);
+       mutex_unlock(&smi_data_lock);
        return ret;
 }
 
        ssize_t ret;
 
        /* make sure buffer is available for host control command */
-       down(&smi_data_lock);
+       mutex_lock(&smi_data_lock);
        ret = smi_data_buf_realloc(sizeof(struct apm_cmd));
-       up(&smi_data_lock);
+       mutex_unlock(&smi_data_lock);
        if (ret)
                return ret;
 
        unsigned long val = simple_strtoul(buf, NULL, 10);
        ssize_t ret;
 
-       down(&smi_data_lock);
+       mutex_lock(&smi_data_lock);
 
        if (smi_data_buf_size < sizeof(struct smi_cmd)) {
                ret = -ENODEV;
        }
 
 out:
-       up(&smi_data_lock);
+       mutex_unlock(&smi_data_lock);
        return ret;
 }
 
 
 #include <linux/rwsem.h>
 #include <linux/stddef.h>
 #include <linux/device.h>
+#include <linux/mutex.h>
 #include <net/slhc_vj.h>
 #include <asm/atomic.h>
 
 static void cardmap_destroy(struct cardmap **map);
 
 /*
- * all_ppp_sem protects the all_ppp_units mapping.
+ * all_ppp_mutex protects the all_ppp_units mapping.
  * It also ensures that finding a ppp unit in the all_ppp_units map
  * and updating its file.refcnt field is atomic.
  */
-static DECLARE_MUTEX(all_ppp_sem);
+static DEFINE_MUTEX(all_ppp_mutex);
 static struct cardmap *all_ppp_units;
 static atomic_t ppp_unit_count = ATOMIC_INIT(0);
 
                /* Attach to an existing ppp unit */
                if (get_user(unit, p))
                        break;
-               down(&all_ppp_sem);
+               mutex_lock(&all_ppp_mutex);
                err = -ENXIO;
                ppp = ppp_find_unit(unit);
                if (ppp != 0) {
                        file->private_data = &ppp->file;
                        err = 0;
                }
-               up(&all_ppp_sem);
+               mutex_unlock(&all_ppp_mutex);
                break;
 
        case PPPIOCATTCHAN:
        dev->do_ioctl = ppp_net_ioctl;
 
        ret = -EEXIST;
-       down(&all_ppp_sem);
+       mutex_lock(&all_ppp_mutex);
        if (unit < 0)
                unit = cardmap_find_first_free(all_ppp_units);
        else if (cardmap_get(all_ppp_units, unit) != NULL)
 
        atomic_inc(&ppp_unit_count);
        cardmap_set(&all_ppp_units, unit, ppp);
-       up(&all_ppp_sem);
+       mutex_unlock(&all_ppp_mutex);
        *retp = 0;
        return ppp;
 
 out2:
-       up(&all_ppp_sem);
+       mutex_unlock(&all_ppp_mutex);
        free_netdev(dev);
 out1:
        kfree(ppp);
 {
        struct net_device *dev;
 
-       down(&all_ppp_sem);
+       mutex_lock(&all_ppp_mutex);
        ppp_lock(ppp);
        dev = ppp->dev;
        ppp->dev = NULL;
        ppp->file.dead = 1;
        ppp->owner = NULL;
        wake_up_interruptible(&ppp->file.rwait);
-       up(&all_ppp_sem);
+       mutex_unlock(&all_ppp_mutex);
 }
 
 /*
 
 /*
  * Locate an existing ppp unit.
- * The caller should have locked the all_ppp_sem.
+ * The caller should have locked the all_ppp_mutex.
  */
 static struct ppp *
 ppp_find_unit(int unit)
        int ret = -ENXIO;
        int hdrlen;
 
-       down(&all_ppp_sem);
+       mutex_lock(&all_ppp_mutex);
        ppp = ppp_find_unit(unit);
        if (ppp == 0)
                goto out;
  outl:
        write_unlock_bh(&pch->upl);
  out:
-       up(&all_ppp_sem);
+       mutex_unlock(&all_ppp_mutex);
        return ret;
 }