return r;
 
        bo_va = amdgpu_vm_bo_find(vm, abo);
-       if (!bo_va) {
+       if (!bo_va)
                bo_va = amdgpu_vm_bo_add(adev, vm, abo);
-       } else {
+       else
                ++bo_va->ref_count;
-       }
        amdgpu_bo_unreserve(abo);
        return 0;
 }
 
        r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
        if (r) {
-               dev_err(adev->dev, "leaking bo va because "
-                       "we fail to reserve bo (%ld)\n", r);
+               dev_err(adev->dev, "leaking bo va because we fail to reserve bo (%ld)\n",
+                       r);
                return;
        }
        bo_va = amdgpu_vm_bo_find(vm, bo);
 
 out_unlock:
        if (unlikely(r < 0))
-               dev_err(adev->dev, "failed to clear page "
-                       "tables on GEM object close (%ld)\n", r);
+               dev_err(adev->dev, "failed to clear page tables on GEM object close (%ld)\n",
+                       r);
        ttm_eu_backoff_reservation(&ticket, &list);
 }
 
        struct amdgpu_bo *robj;
 
        gobj = drm_gem_object_lookup(filp, handle);
-       if (gobj == NULL) {
+       if (!gobj)
                return -ENOENT;
-       }
+
        robj = gem_to_amdgpu_bo(gobj);
        if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
            (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
 {
        union drm_amdgpu_gem_mmap *args = data;
        uint32_t handle = args->in.handle;
+
        memset(args, 0, sizeof(*args));
        return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
 }
 
        timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
        /*  clamp timeout to avoid unsigned-> signed overflow */
-       if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
+       if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT)
                return MAX_SCHEDULE_TIMEOUT - 1;
 
        return timeout_jiffies;
        long ret;
 
        gobj = drm_gem_object_lookup(filp, handle);
-       if (gobj == NULL) {
+       if (!gobj)
                return -ENOENT;
-       }
+
        robj = gem_to_amdgpu_bo(gobj);
        ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
                                    true, timeout);
        struct amdgpu_bo *robj;
        int r = -1;
 
-       DRM_DEBUG("%d \n", args->handle);
+       DRM_DEBUG("%d\n", args->handle);
        gobj = drm_gem_object_lookup(filp, args->handle);
        if (gobj == NULL)
                return -ENOENT;
 
        if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
                dev_dbg(dev->dev,
-                       "va_address 0x%LX is in reserved area 0x%LX\n",
+                       "va_address 0x%llx is in reserved area 0x%llx\n",
                        args->va_address, AMDGPU_VA_RESERVED_SIZE);
                return -EINVAL;
        }
        if (args->va_address >= AMDGPU_GMC_HOLE_START &&
            args->va_address < AMDGPU_GMC_HOLE_END) {
                dev_dbg(dev->dev,
-                       "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
+                       "va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
                        args->va_address, AMDGPU_GMC_HOLE_START,
                        AMDGPU_GMC_HOLE_END);
                return -EINVAL;
        int r;
 
        gobj = drm_gem_object_lookup(filp, args->handle);
-       if (gobj == NULL) {
+       if (!gobj)
                return -ENOENT;
-       }
+
        robj = gem_to_amdgpu_bo(gobj);
 
        r = amdgpu_bo_reserve(robj, false);
        r = drm_gem_handle_create(file_priv, gobj, &handle);
        /* drop reference from allocate - handle holds it now */
        drm_gem_object_put(gobj);
-       if (r) {
+       if (r)
                return r;
-       }
+
        args->handle = handle;
        return 0;
 }