mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
cmd->num_comps);
- if (atomic_dec_and_test(&mdp->job_count)) {
+ if (refcount_dec_and_test(&mdp->job_count)) {
if (cmd->mdp_ctx)
mdp_m2m_job_finish(cmd->mdp_ctx);
mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
cmd->num_comps);
- if (atomic_dec_and_test(&mdp->job_count))
+ if (refcount_dec_and_test(&mdp->job_count))
wake_up(&mdp->callback_wq);
mdp_cmdq_pkt_destroy(&cmd->pkt);
int i, ret;
u8 pp_used = __get_pp_num(param->param->type);
- atomic_set(&mdp->job_count, pp_used);
+ refcount_set(&mdp->job_count, pp_used);
if (atomic_read(&mdp->suspended)) {
- atomic_set(&mdp->job_count, 0);
+ refcount_set(&mdp->job_count, 0);
return -ECANCELED;
}
mdp_comp_clocks_off(&mdp->pdev->dev, cmd[i]->comps,
cmd[i]->num_comps);
err_cancel_job:
- atomic_set(&mdp->job_count, 0);
+ refcount_set(&mdp->job_count, 0);
return ret;
}
atomic_set(&mdp->suspended, 1);
- if (atomic_read(&mdp->job_count)) {
+ if (refcount_read(&mdp->job_count)) {
ret = wait_event_timeout(mdp->callback_wq,
- !atomic_read(&mdp->job_count),
+ !refcount_read(&mdp->job_count),
2 * HZ);
if (ret == 0) {
dev_err(dev,
"%s:flushed cmdq task incomplete, count=%d\n",
- __func__, atomic_read(&mdp->job_count));
+ __func__, refcount_read(&mdp->job_count));
return -EBUSY;
}
}
task.cb_data = NULL;
task.mdp_ctx = ctx;
- if (atomic_read(&ctx->mdp_dev->job_count)) {
+ if (refcount_read(&ctx->mdp_dev->job_count)) {
ret = wait_event_timeout(ctx->mdp_dev->callback_wq,
- !atomic_read(&ctx->mdp_dev->job_count),
+ !refcount_read(&ctx->mdp_dev->job_count),
2 * HZ);
if (ret == 0) {
dev_err(&ctx->mdp_dev->pdev->dev,
"%d jobs not yet done\n",
- atomic_read(&ctx->mdp_dev->job_count));
+ refcount_read(&ctx->mdp_dev->job_count));
goto worker_end;
}
}