ret = PTR_ERR(mapping);
                        break;
                }
+               atomic_inc(&etnaviv_obj->gpu_active);
 
                submit->bos[i].flags |= BO_PINNED;
                submit->bos[i].mapping = mapping;
                /* unpin all objects */
                if (submit->bos[i].flags & BO_PINNED) {
                        etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
+                       atomic_dec(&etnaviv_obj->gpu_active);
                        submit->bos[i].mapping = NULL;
                        submit->bos[i].flags &= ~BO_PINNED;
                }
                drm_gem_object_put_unlocked(&etnaviv_obj->base);
        }
 
+       wake_up_all(&submit->gpu->fence_event);
+
        if (submit->in_fence)
                dma_fence_put(submit->in_fence);
        if (submit->out_fence)
 
                                               retire_work);
        u32 fence = gpu->completed_fence;
        struct etnaviv_gem_submit *submit, *tmp;
-       unsigned int i;
 
        mutex_lock(&gpu->lock);
        list_for_each_entry_safe(submit, tmp, &gpu->active_submit_list, node) {
 
                list_del(&submit->node);
 
-               for (i = 0; i < submit->nr_bos; i++)
-                       atomic_dec(&submit->bos[i].obj->gpu_active);
-
                etnaviv_submit_put(submit);
                /*
                 * We need to balance the runtime PM count caused by
        gpu->retired_fence = fence;
 
        mutex_unlock(&gpu->lock);
-
-       wake_up_all(&gpu->fence_event);
 }
 
 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
        /* We're committed to adding this command buffer, hold a PM reference */
        pm_runtime_get_noresume(gpu->dev);
 
-       for (i = 0; i < submit->nr_bos; i++) {
-               struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
-               atomic_inc(&etnaviv_obj->gpu_active);
-       }
        hangcheck_timer_reset(gpu);
        ret = 0;