#include <linux/completion.h>
 #include <linux/debugfs.h>
 #include <linux/device.h>
+#include <linux/hrtimer.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
+#include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/power_supply.h>
 #include <linux/usb/role.h>
 #include <linux/usb/tcpm.h>
 #include <linux/usb/typec_altmode.h>
-#include <linux/workqueue.h>
+
+#include <uapi/linux/sched/types.h>
 
 #define FOREACH_STATE(S)                       \
        S(INVALID_STATE),                       \
        struct device *dev;
 
        struct mutex lock;              /* tcpm state machine lock */
-       struct workqueue_struct *wq;
+       struct kthread_worker *wq;
 
        struct typec_capability typec_caps;
        struct typec_port *typec_port;
        enum tcpm_state prev_state;
        enum tcpm_state state;
        enum tcpm_state delayed_state;
-       unsigned long delayed_runtime;
+       ktime_t delayed_runtime;
        unsigned long delay_ms;
 
        spinlock_t pd_event_lock;
        u32 pd_events;
 
-       struct work_struct event_work;
-       struct delayed_work state_machine;
-       struct delayed_work vdm_state_machine;
+       struct kthread_work event_work;
+       struct hrtimer state_machine_timer;
+       struct kthread_work state_machine;
+       struct hrtimer vdm_state_machine_timer;
+       struct kthread_work vdm_state_machine;
        bool state_machine_running;
 
        struct completion tx_complete;
 };
 
 struct pd_rx_event {
-       struct work_struct work;
+       struct kthread_work work;
        struct tcpm_port *port;
        struct pd_message msg;
 };
        return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
 }
 
+static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
+{
+       if (delay_ms) {
+               hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
+       } else {
+               hrtimer_cancel(&port->state_machine_timer);
+               kthread_queue_work(port->wq, &port->state_machine);
+       }
+}
+
+static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
+{
+       if (delay_ms) {
+               hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
+                             HRTIMER_MODE_REL);
+       } else {
+               hrtimer_cancel(&port->vdm_state_machine_timer);
+               kthread_queue_work(port->wq, &port->vdm_state_machine);
+       }
+}
+
 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
                           unsigned int delay_ms)
 {
                         tcpm_states[port->state], tcpm_states[state],
                         delay_ms);
                port->delayed_state = state;
-               mod_delayed_work(port->wq, &port->state_machine,
-                                msecs_to_jiffies(delay_ms));
-               port->delayed_runtime = jiffies + msecs_to_jiffies(delay_ms);
+               mod_tcpm_delayed_work(port, delay_ms);
+               port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
                port->delay_ms = delay_ms;
        } else {
                tcpm_log(port, "state change %s -> %s",
                 * machine.
                 */
                if (!port->state_machine_running)
-                       mod_delayed_work(port->wq, &port->state_machine, 0);
+                       mod_tcpm_delayed_work(port, 0);
        }
 }
 
                               enum pd_msg_request message)
 {
        port->queued_message = message;
-       mod_delayed_work(port->wq, &port->state_machine, 0);
+       mod_tcpm_delayed_work(port, 0);
 }
 
 /*
        port->vdm_retries = 0;
        port->vdm_state = VDM_STATE_READY;
 
-       mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
+       mod_vdm_delayed_work(port, 0);
 }
 
 static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
                        port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
                        port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
                                CMDT_INIT;
-                       mod_delayed_work(port->wq, &port->vdm_state_machine,
-                                        msecs_to_jiffies(PD_T_VDM_BUSY));
+                       mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
                        return;
                }
                port->vdm_state = VDM_STATE_DONE;
                        port->vdm_retries = 0;
                        port->vdm_state = VDM_STATE_BUSY;
                        timeout = vdm_ready_timeout(port->vdo_data[0]);
-                       mod_delayed_work(port->wq, &port->vdm_state_machine,
-                                        timeout);
+                       mod_vdm_delayed_work(port, timeout);
                }
                break;
        case VDM_STATE_WAIT_RSP_BUSY:
        }
 }
 
-static void vdm_state_machine_work(struct work_struct *work)
+static void vdm_state_machine_work(struct kthread_work *work)
 {
-       struct tcpm_port *port = container_of(work, struct tcpm_port,
-                                             vdm_state_machine.work);
+       struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
        enum vdm_states prev_state;
 
        mutex_lock(&port->lock);
        struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
 
        tcpm_queue_vdm_unlocked(port, header, data, count - 1);
+
        return 0;
 }
 
        }
 }
 
-static void tcpm_pd_rx_handler(struct work_struct *work)
+static void tcpm_pd_rx_handler(struct kthread_work *work)
 {
        struct pd_rx_event *event = container_of(work,
                                                 struct pd_rx_event, work);
        if (!event)
                return;
 
-       INIT_WORK(&event->work, tcpm_pd_rx_handler);
+       kthread_init_work(&event->work, tcpm_pd_rx_handler);
        event->port = port;
        memcpy(&event->msg, msg, sizeof(*msg));
-       queue_work(port->wq, &event->work);
+       kthread_queue_work(port->wq, &event->work);
 }
 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
 
        } while (port->queued_message != PD_MSG_NONE);
 
        if (port->delayed_state != INVALID_STATE) {
-               if (time_is_after_jiffies(port->delayed_runtime)) {
-                       mod_delayed_work(port->wq, &port->state_machine,
-                                        port->delayed_runtime - jiffies);
+               if (ktime_after(port->delayed_runtime, ktime_get())) {
+                       mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
+                                                                         ktime_get())));
                        return true;
                }
                port->delayed_state = INVALID_STATE;
        case SNK_DISCOVERY_DEBOUNCE_DONE:
                if (!tcpm_port_is_disconnected(port) &&
                    tcpm_port_is_sink(port) &&
-                   time_is_after_jiffies(port->delayed_runtime)) {
+                   ktime_after(port->delayed_runtime, ktime_get())) {
                        tcpm_set_state(port, SNK_DISCOVERY,
-                                      jiffies_to_msecs(port->delayed_runtime -
-                                                       jiffies));
+                                      ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
                        break;
                }
                tcpm_set_state(port, unattached_state(port), 0);
        }
 }
 
-static void tcpm_state_machine_work(struct work_struct *work)
+static void tcpm_state_machine_work(struct kthread_work *work)
 {
-       struct tcpm_port *port = container_of(work, struct tcpm_port,
-                                             state_machine.work);
+       struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
        enum tcpm_state prev_state;
 
        mutex_lock(&port->lock);
                       0);
 }
 
-static void tcpm_pd_event_handler(struct work_struct *work)
+static void tcpm_pd_event_handler(struct kthread_work *work)
 {
        struct tcpm_port *port = container_of(work, struct tcpm_port,
                                              event_work);
        spin_lock(&port->pd_event_lock);
        port->pd_events |= TCPM_CC_EVENT;
        spin_unlock(&port->pd_event_lock);
-       queue_work(port->wq, &port->event_work);
+       kthread_queue_work(port->wq, &port->event_work);
 }
 EXPORT_SYMBOL_GPL(tcpm_cc_change);
 
        spin_lock(&port->pd_event_lock);
        port->pd_events |= TCPM_VBUS_EVENT;
        spin_unlock(&port->pd_event_lock);
-       queue_work(port->wq, &port->event_work);
+       kthread_queue_work(port->wq, &port->event_work);
 }
 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
 
        spin_lock(&port->pd_event_lock);
        port->pd_events = TCPM_RESET_EVENT;
        spin_unlock(&port->pd_event_lock);
-       queue_work(port->wq, &port->event_work);
+       kthread_queue_work(port->wq, &port->event_work);
 }
 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
 
        return PTR_ERR_OR_ZERO(port->psy);
 }
 
+static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
+{
+       struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
+
+       kthread_queue_work(port->wq, &port->state_machine);
+       return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
+{
+       struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
+
+       kthread_queue_work(port->wq, &port->vdm_state_machine);
+       return HRTIMER_NORESTART;
+}
+
 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
 {
        struct tcpm_port *port;
        mutex_init(&port->lock);
        mutex_init(&port->swap_lock);
 
-       port->wq = create_singlethread_workqueue(dev_name(dev));
-       if (!port->wq)
-               return ERR_PTR(-ENOMEM);
-       INIT_DELAYED_WORK(&port->state_machine, tcpm_state_machine_work);
-       INIT_DELAYED_WORK(&port->vdm_state_machine, vdm_state_machine_work);
-       INIT_WORK(&port->event_work, tcpm_pd_event_handler);
+       port->wq = kthread_create_worker(0, dev_name(dev));
+       if (IS_ERR(port->wq))
+               return ERR_CAST(port->wq);
+       sched_set_fifo(port->wq->task);
+
+       kthread_init_work(&port->state_machine, tcpm_state_machine_work);
+       kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
+       kthread_init_work(&port->event_work, tcpm_pd_event_handler);
+       hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       port->state_machine_timer.function = state_machine_timer_handler;
+       hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
 
        spin_lock_init(&port->pd_event_lock);
 
        usb_role_switch_put(port->role_sw);
 out_destroy_wq:
        tcpm_debugfs_exit(port);
-       destroy_workqueue(port->wq);
+       kthread_destroy_worker(port->wq);
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(tcpm_register_port);
        typec_unregister_port(port->typec_port);
        usb_role_switch_put(port->role_sw);
        tcpm_debugfs_exit(port);
-       destroy_workqueue(port->wq);
+       kthread_destroy_worker(port->wq);
 }
 EXPORT_SYMBOL_GPL(tcpm_unregister_port);