ipmi: kill off 'timespec' usage again
authorArnd Bergmann <arnd@arndb.de>
Fri, 8 Nov 2019 20:34:27 +0000 (21:34 +0100)
committerCorey Minyard <cminyard@mvista.com>
Fri, 8 Nov 2019 22:05:14 +0000 (16:05 -0600)
'struct timespec' is getting removed from the kernel. The usage in ipmi
was fixed before in commit 48862ea2ce86 ("ipmi: Update timespec usage
to timespec64"), but unfortunately it crept back in.

The busy looping code can better use ktime_t anyway, so use that
there to simplify the implementation.

Fixes: cbb19cb1eef0 ("ipmi_si: Convert timespec64 to timespec")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Message-Id: <20191108203435.112759-5-arnd@arndb.de>
Signed-off-by: Corey Minyard <cminyard@mvista.com>
drivers/char/ipmi/ipmi_si_intf.c

index 6b9a0593d2eb75efdbef280a4597efe72b0ea776..c7cc8538b84aec726b3483197636e4e38e3ff4ae 100644 (file)
@@ -265,10 +265,10 @@ static void cleanup_ipmi_si(void);
 #ifdef DEBUG_TIMING
 void debug_timestamp(char *msg)
 {
-       struct timespec t;
+       struct timespec64 t;
 
-       ktime_get_ts(&t);
-       pr_debug("**%s: %ld.%9.9ld\n", msg, (long) t.tv_sec, t.tv_nsec);
+       ktime_get_ts64(&t);
+       pr_debug("**%s: %lld.%9.9ld\n", msg, t.tv_sec, t.tv_nsec);
 }
 #else
 #define debug_timestamp(x)
@@ -935,38 +935,25 @@ static void set_run_to_completion(void *send_info, bool i_run_to_completion)
 }
 
 /*
- * Use -1 in the nsec value of the busy waiting timespec to tell that
- * we are spinning in kipmid looking for something and not delaying
- * between checks
+ * Use -1 as a special constant to tell that we are spinning in kipmid
+ * looking for something and not delaying between checks
  */
-static inline void ipmi_si_set_not_busy(struct timespec *ts)
-{
-       ts->tv_nsec = -1;
-}
-static inline int ipmi_si_is_busy(struct timespec *ts)
-{
-       return ts->tv_nsec != -1;
-}
-
+#define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
 static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
                                         const struct smi_info *smi_info,
-                                        struct timespec *busy_until)
+                                        ktime_t *busy_until)
 {
        unsigned int max_busy_us = 0;
 
        if (smi_info->si_num < num_max_busy_us)
                max_busy_us = kipmid_max_busy_us[smi_info->si_num];
        if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
-               ipmi_si_set_not_busy(busy_until);
-       else if (!ipmi_si_is_busy(busy_until)) {
-               ktime_get_ts(busy_until);
-               timespec_add_ns(busy_until, max_busy_us * NSEC_PER_USEC);
+               *busy_until = IPMI_TIME_NOT_BUSY;
+       else if (*busy_until == IPMI_TIME_NOT_BUSY) {
+               *busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC;
        } else {
-               struct timespec now;
-
-               ktime_get_ts(&now);
-               if (unlikely(timespec_compare(&now, busy_until) > 0)) {
-                       ipmi_si_set_not_busy(busy_until);
+               if (unlikely(ktime_get() > *busy_until)) {
+                       *busy_until = IPMI_TIME_NOT_BUSY;
                        return false;
                }
        }
@@ -988,9 +975,8 @@ static int ipmi_thread(void *data)
        struct smi_info *smi_info = data;
        unsigned long flags;
        enum si_sm_result smi_result;
-       struct timespec busy_until = { 0, 0 };
+       ktime_t busy_until = IPMI_TIME_NOT_BUSY;
 
-       ipmi_si_set_not_busy(&busy_until);
        set_user_nice(current, MAX_NICE);
        while (!kthread_should_stop()) {
                int busy_wait;