spin_unlock_irqrestore(&health->wq_lock, flags);
 }
 
+#define MLX5_MSEC_PER_HOUR (MSEC_PER_SEC * 60 * 60)
+static void mlx5_health_log_ts_update(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       u32 out[MLX5_ST_SZ_DW(mrtc_reg)] = {};
+       u32 in[MLX5_ST_SZ_DW(mrtc_reg)] = {};
+       struct mlx5_core_health *health;
+       struct mlx5_core_dev *dev;
+       struct mlx5_priv *priv;
+       u64 now_us;
+
+       health = container_of(dwork, struct mlx5_core_health, update_fw_log_ts_work);
+       priv = container_of(health, struct mlx5_priv, health);
+       dev = container_of(priv, struct mlx5_core_dev, priv);
+
+       now_us =  ktime_to_us(ktime_get_real());
+
+       MLX5_SET(mrtc_reg, in, time_h, now_us >> 32);
+       MLX5_SET(mrtc_reg, in, time_l, now_us & 0xFFFFFFFF);
+       mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MRTC, 0, 1);
+
+       queue_delayed_work(health->wq, &health->update_fw_log_ts_work,
+                          msecs_to_jiffies(MLX5_MSEC_PER_HOUR));
+}
+
 static void poll_health(struct timer_list *t)
 {
        struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
        spin_lock_irqsave(&health->wq_lock, flags);
        set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
        spin_unlock_irqrestore(&health->wq_lock, flags);
+       cancel_delayed_work_sync(&health->update_fw_log_ts_work);
        cancel_work_sync(&health->report_work);
        cancel_work_sync(&health->fatal_report_work);
 }
 {
        struct mlx5_core_health *health = &dev->priv.health;
 
+       cancel_delayed_work_sync(&health->update_fw_log_ts_work);
        destroy_workqueue(health->wq);
        mlx5_fw_reporters_destroy(dev);
 }
        spin_lock_init(&health->wq_lock);
        INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
        INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
+       INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
+       if (mlx5_core_is_pf(dev))
+               queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
 
        return 0;
 
 
        MLX5_REG_MCIA            = 0x9014,
        MLX5_REG_MFRL            = 0x9028,
        MLX5_REG_MLCR            = 0x902b,
+       MLX5_REG_MRTC            = 0x902d,
        MLX5_REG_MTRC_CAP        = 0x9040,
        MLX5_REG_MTRC_CONF       = 0x9041,
        MLX5_REG_MTRC_STDB       = 0x9042,
        struct work_struct              report_work;
        struct devlink_health_reporter *fw_reporter;
        struct devlink_health_reporter *fw_fatal_reporter;
+       struct delayed_work             update_fw_log_ts_work;
 };
 
 struct mlx5_qp_table {
 
        union mlx5_ifc_pddr_reg_page_data_auto_bits page_data;
 };
 
+struct mlx5_ifc_mrtc_reg_bits {
+       u8         time_synced[0x1];
+       u8         reserved_at_1[0x1f];
+
+       u8         reserved_at_20[0x20];
+
+       u8         time_h[0x20];
+
+       u8         time_l[0x20];
+};
+
 union mlx5_ifc_ports_control_registers_document_bits {
        struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
        struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
        struct mlx5_ifc_mirc_reg_bits mirc_reg;
        struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
        struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
+       struct mlx5_ifc_mrtc_reg_bits mrtc_reg;
        u8         reserved_at_0[0x60e0];
 };