* Can't use generic function to check this because of special case
         * where we create a CB as part of the reset process
         */
-       if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
+       if ((hdev->disabled) || ((atomic_read(&hdev->reset_info.in_reset)) &&
                                        (ctx_id != HL_KERNEL_ASID_ID))) {
                dev_warn_ratelimited(hdev->dev,
                        "Device is disabled or in reset. Can't create new CBs\n");
 
                if (hdev->reset_on_lockup)
                        hl_device_reset(hdev, HL_DRV_RESET_TDR);
                else
-                       hdev->needs_reset = true;
+                       hdev->reset_info.needs_reset = true;
        }
 }
 
        cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
        cs->timeout_jiffies = timeout;
        cs->skip_reset_on_timeout =
-               hdev->skip_reset_on_timeout ||
+               hdev->reset_info.skip_reset_on_timeout ||
                !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
        cs->submission_time_jiffies = jiffies;
        INIT_LIST_HEAD(&cs->job_list);
 
        struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
        struct hl_device *hdev = dev_entry->hdev;
 
-       if (atomic_read(&hdev->in_reset)) {
+       if (atomic_read(&hdev->reset_info.in_reset)) {
                dev_warn_ratelimited(hdev->dev,
                                "Can't check device idle during reset\n");
                return 0;
        ssize_t rc;
        u32 val;
 
-       if (atomic_read(&hdev->in_reset)) {
+       if (atomic_read(&hdev->reset_info.in_reset)) {
                dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
                return 0;
        }
        u32 value;
        ssize_t rc;
 
-       if (atomic_read(&hdev->in_reset)) {
+       if (atomic_read(&hdev->reset_info.in_reset)) {
                dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
                return 0;
        }
        ssize_t rc;
        u64 val;
 
-       if (atomic_read(&hdev->in_reset)) {
+       if (atomic_read(&hdev->reset_info.in_reset)) {
                dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
                return 0;
        }
        u64 value;
        ssize_t rc;
 
-       if (atomic_read(&hdev->in_reset)) {
+       if (atomic_read(&hdev->reset_info.in_reset)) {
                dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
                return 0;
        }
        ssize_t rc;
        u32 size;
 
-       if (atomic_read(&hdev->in_reset)) {
+       if (atomic_read(&hdev->reset_info.in_reset)) {
                dev_warn_ratelimited(hdev->dev, "Can't DMA during reset\n");
                return 0;
        }
        u64 value;
        ssize_t rc;
 
-       if (atomic_read(&hdev->in_reset)) {
+       if (atomic_read(&hdev->reset_info.in_reset)) {
                dev_warn_ratelimited(hdev->dev,
                                "Can't change clock gating during reset\n");
                return 0;
        u32 value;
        ssize_t rc;
 
-       if (atomic_read(&hdev->in_reset)) {
+       if (atomic_read(&hdev->reset_info.in_reset)) {
                dev_warn_ratelimited(hdev->dev,
                                "Can't change stop on error during reset\n");
                return 0;
        debugfs_create_x8("skip_reset_on_timeout",
                                0644,
                                dev_entry->root,
-                               &hdev->skip_reset_on_timeout);
+                               &hdev->reset_info.skip_reset_on_timeout);
 
        debugfs_create_file("state_dump",
                                0600,
 
 {
        enum hl_device_status status;
 
-       if (atomic_read(&hdev->in_reset))
+       if (atomic_read(&hdev->reset_info.in_reset))
                status = HL_DEVICE_STATUS_IN_RESET;
-       else if (hdev->needs_reset)
+       else if (hdev->reset_info.needs_reset)
                status = HL_DEVICE_STATUS_NEEDS_RESET;
        else if (hdev->disabled)
                status = HL_DEVICE_STATUS_MALFUNCTION;
        INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);
        mutex_init(&hdev->fpriv_list_lock);
        mutex_init(&hdev->fpriv_ctrl_list_lock);
-       atomic_set(&hdev->in_reset, 0);
+       atomic_set(&hdev->reset_info.in_reset, 0);
        mutex_init(&hdev->clk_throttling.lock);
 
        return 0;
         * status for at least one heartbeat. From this point driver restarts
         * tracking future consecutive fatal errors.
         */
-       if (!(atomic_read(&hdev->in_reset)))
-               hdev->prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
+       if (!(atomic_read(&hdev->reset_info.in_reset)))
+               hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
 
        schedule_delayed_work(&hdev->work_heartbeat,
                        usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
                        goto out;
                }
 
-               if (!hdev->hard_reset_pending)
+               if (!hdev->reset_info.hard_reset_pending)
                        hdev->asic_funcs->halt_coresight(hdev, ctx);
 
                hdev->in_debug = 0;
 
-               if (!hdev->hard_reset_pending)
+               if (!hdev->reset_info.hard_reset_pending)
                        hdev->asic_funcs->set_clock_gating(hdev);
 
                goto out;
        pci_save_state(hdev->pdev);
 
        /* Block future CS/VM/JOB completion operations */
-       rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+       rc = atomic_cmpxchg(&hdev->reset_info.in_reset, 0, 1);
        if (rc) {
                dev_err(hdev->dev, "Can't suspend while in reset\n");
                return -EIO;
 
 
        hdev->disabled = false;
-       atomic_set(&hdev->in_reset, 0);
+       atomic_set(&hdev->reset_info.in_reset, 0);
 
        rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
        if (rc) {
         * 'reset_cause' will continue holding its 1st recorded reason!
         */
        if (flags & HL_DRV_RESET_HEARTBEAT) {
-               hdev->curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
+               hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
                cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;
        } else if (flags & HL_DRV_RESET_TDR) {
-               hdev->curr_reset_cause = HL_RESET_CAUSE_TDR;
+               hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;
                cur_reset_trigger = HL_DRV_RESET_TDR;
        } else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {
-               hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+               hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
                cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;
        } else {
-               hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+               hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
        }
 
        /*
         * is set and if this reset is due to a fatal FW error
         * device is set to an unstable state.
         */
-       if (hdev->prev_reset_trigger != cur_reset_trigger) {
-               hdev->prev_reset_trigger = cur_reset_trigger;
-               hdev->reset_trigger_repeated = 0;
+       if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {
+               hdev->reset_info.prev_reset_trigger = cur_reset_trigger;
+               hdev->reset_info.reset_trigger_repeated = 0;
        } else {
-               hdev->reset_trigger_repeated = 1;
+               hdev->reset_info.reset_trigger_repeated = 1;
        }
 
        /* If reset is due to heartbeat, device CPU is no responsive in
        from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
        fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
 
-       if (!hard_reset && !hdev->supports_soft_reset) {
+       if (!hard_reset && !hdev->asic_prop.supports_soft_reset) {
                hard_instead_soft = true;
                hard_reset = true;
        }
                goto do_reset;
        }
 
-       if (!hard_reset && !hdev->allow_inference_soft_reset) {
+       if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
                hard_instead_soft = true;
                hard_reset = true;
        }
         */
        if (!from_hard_reset_thread) {
                /* Block future CS/VM/JOB completion operations */
-               rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+               rc = atomic_cmpxchg(&hdev->reset_info.in_reset, 0, 1);
                if (rc)
                        return 0;
 
                handle_reset_trigger(hdev, flags);
 
-               hdev->is_in_soft_reset = !hard_reset;
+               /* This still allows the completion of some KDMA ops */
+               hdev->reset_info.is_in_soft_reset = !hard_reset;
 
                /* This also blocks future CS/VM/JOB completion operations */
                hdev->disabled = true;
 
 again:
        if ((hard_reset) && (!from_hard_reset_thread)) {
-               hdev->hard_reset_pending = true;
+               hdev->reset_info.hard_reset_pending = true;
 
                hdev->process_kill_trial_cnt = 0;
 
 
        if (hard_reset) {
                hdev->device_cpu_disabled = false;
-               hdev->hard_reset_pending = false;
+               hdev->reset_info.hard_reset_pending = false;
 
-               if (hdev->reset_trigger_repeated &&
-                               (hdev->prev_reset_trigger == HL_DRV_RESET_FW_FATAL_ERR)) {
+               if (hdev->reset_info.reset_trigger_repeated &&
+                               (hdev->reset_info.prev_reset_trigger ==
+                                               HL_DRV_RESET_FW_FATAL_ERR)) {
                        /* if there 2 back to back resets from FW,
                         * ensure driver puts the driver in a unusable state
                         */
         * is required for the initialization itself
         */
        hdev->disabled = false;
-       hdev->is_in_soft_reset = false;
+       hdev->reset_info.is_in_soft_reset = false;
 
        rc = hdev->asic_funcs->hw_init(hdev);
        if (rc) {
                }
        }
 
-       atomic_set(&hdev->in_reset, 0);
-       hdev->needs_reset = false;
+       atomic_set(&hdev->reset_info.in_reset, 0);
+       hdev->reset_info.needs_reset = false;
 
        dev_notice(hdev->dev, "Successfully finished resetting the device\n");
 
        if (hard_reset) {
-               hdev->hard_reset_cnt++;
+               hdev->reset_info.hard_reset_cnt++;
 
                /* After reset is done, we are ready to receive events from
                 * the F/W. We can't do it before because we will ignore events
                 */
                hdev->asic_funcs->enable_events_from_fw(hdev);
        } else if (!reset_upon_device_release) {
-               hdev->soft_reset_cnt++;
+               hdev->reset_info.soft_reset_cnt++;
        }
 
        return 0;
 
 out_err:
        hdev->disabled = true;
-       hdev->is_in_soft_reset = false;
+       hdev->reset_info.is_in_soft_reset = false;
 
        if (hard_reset) {
                dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n");
-               hdev->hard_reset_cnt++;
+               hdev->reset_info.hard_reset_cnt++;
        } else if (reset_upon_device_release) {
                dev_err(hdev->dev, "Failed to reset device after user release\n");
                hard_reset = true;
                goto again;
        } else {
                dev_err(hdev->dev, "Failed to do soft-reset\n");
-               hdev->soft_reset_cnt++;
+               hdev->reset_info.soft_reset_cnt++;
                hard_reset = true;
                goto again;
        }
 
-       atomic_set(&hdev->in_reset, 0);
+       atomic_set(&hdev->reset_info.in_reset, 0);
 
        return rc;
 }
         */
 
        timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
-       rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+       rc = atomic_cmpxchg(&hdev->reset_info.in_reset, 0, 1);
        while (rc) {
                usleep_range(50, 200);
-               rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+               rc = atomic_cmpxchg(&hdev->reset_info.in_reset, 0, 1);
                if (ktime_compare(ktime_get(), timeout) > 0) {
                        dev_crit(hdev->dev,
                                "Failed to remove device because reset function did not finish\n");
 
        take_release_locks(hdev);
 
-       hdev->hard_reset_pending = true;
+       hdev->reset_info.hard_reset_pending = true;
 
        hl_hwmon_fini(hdev);
 
 
        if (rc)
                goto protocol_err;
 
-       if (hdev->curr_reset_cause) {
+       if (hdev->reset_info.curr_reset_cause) {
                rc = hl_fw_dynamic_send_msg(hdev, fw_loader,
-                               HL_COMMS_RESET_CAUSE_TYPE, &hdev->curr_reset_cause);
+                               HL_COMMS_RESET_CAUSE_TYPE, &hdev->reset_info.curr_reset_cause);
                if (rc)
                        goto protocol_err;
 
                /* Clear current reset cause */
-               hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+               hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
        }
 
        if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
 
  *                         false otherwise.
  * @use_get_power_for_reset_history: To support backward compatibility for Goya
  *                                   and Gaudi
+ * @supports_soft_reset: is soft reset supported.
+ * @allow_inference_soft_reset: true if the ASIC supports soft reset that is
+ *                              initiated by user or TDR. This is only true
+ *                              in inference ASICs, as there is no real-world
+ *                              use-case of doing soft-reset in training (due
+ *                              to the fact that training runs on multiple
+ *                              devices)
  */
 struct asic_fixed_properties {
        struct hw_queue_properties      *hw_queues_props;
        u8                              dynamic_fw_load;
        u8                              gic_interrupts_enable;
        u8                              use_get_power_for_reset_history;
+       u8                              supports_soft_reset;
+       u8                              allow_inference_soft_reset;
 };
 
 /**
        u8              razwi_type;
 };
 
+/**
+ * struct hl_reset_info - holds current device reset information.
+ * @in_reset: is device in reset flow.
+ * @soft_reset_cnt: number of soft reset since the driver was loaded.
+ * @hard_reset_cnt: number of hard reset since the driver was loaded.
+ * @is_in_soft_reset: Device is currently in soft reset process.
+ * @needs_reset: true if reset_on_lockup is false and device should be reset
+ *               due to lockup.
+ * @hard_reset_pending: is there a hard reset work pending.
+ * @curr_reset_cause: saves an enumerated reset cause when a hard reset is
+ *                    triggered, and cleared after it is shared with preboot.
+ * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
+ *                      with a new value on next reset
+ * @reset_trigger_repeated: set if device reset is triggered more than once with
+ *                          same cause.
+ * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
+ *                         complete instead.
+ */
+struct hl_reset_info {
+       atomic_t        in_reset;
+       u32             soft_reset_cnt;
+       u32             hard_reset_cnt;
+       u8              is_in_soft_reset;
+       u8              needs_reset;
+       u8              hard_reset_pending;
+
+       u8              curr_reset_cause;
+       u8              prev_reset_trigger;
+       u8              reset_trigger_repeated;
+
+       u8              skip_reset_on_timeout;
+};
+
 /**
  * struct hl_device - habanalabs device structure.
  * @pdev: pointer to PCI device, can be NULL in case of simulator device.
  * @state_dump_specs: constants and dictionaries needed to dump system state.
  * @multi_cs_completion: array of multi-CS completion.
  * @clk_throttling: holds information about current/previous clock throttling events
+ * @reset_info: holds current device reset information.
  * @last_error: holds information about last session in which CS timeout or razwi error occurred.
  * @stream_master_qid_arr: pointer to array with QIDs of master streams.
  * @dram_used_mem: current DRAM memory consumption.
  *                                  session.
  * @open_counter: number of successful device open operations.
  * @fw_poll_interval_usec: FW status poll interval in usec.
- * @in_reset: is device in reset flow.
  * @card_type: Various ASICs have several card types. This indicates the card
  *             type of the current device.
  * @major: habanalabs kernel driver major.
  * @high_pll: high PLL profile frequency.
- * @soft_reset_cnt: number of soft reset since the driver was loaded.
- * @hard_reset_cnt: number of hard reset since the driver was loaded.
  * @id: device minor.
  * @id_control: minor of the control device
  * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
  * @disabled: is device disabled.
  * @late_init_done: is late init stage was done during initialization.
  * @hwmon_initialized: is H/W monitor sensors was initialized.
- * @hard_reset_pending: is there a hard reset work pending.
  * @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
  * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
  *                   otherwise.
  * @sync_stream_queue_idx: helper index for sync stream queues initialization.
  * @collective_mon_idx: helper index for collective initialization
  * @supports_coresight: is CoreSight supported.
- * @supports_soft_reset: is soft reset supported.
- * @allow_inference_soft_reset: true if the ASIC supports soft reset that is
- *                              initiated by user or TDR. This is only true
- *                              in inference ASICs, as there is no real-world
- *                              use-case of doing soft-reset in training (due
- *                              to the fact that training runs on multiple
- *                              devices)
  * @supports_cb_mapping: is mapping a CB to the device's MMU supported.
- * @needs_reset: true if reset_on_lockup is false and device should be reset
- *               due to lockup.
  * @process_kill_trial_cnt: number of trials reset thread tried killing
  *                          user processes
  * @device_fini_pending: true if device_fini was called and might be
  *                       waiting for the reset thread to finish
  * @supports_staged_submission: true if staged submissions are supported
- * @curr_reset_cause: saves an enumerated reset cause when a hard reset is
- *                    triggered, and cleared after it is shared with preboot.
- * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
- *                      with a new value on next reset
- * @reset_trigger_repeated: set if device reset is triggered more than once with
- *                          same cause.
- * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
- *                         complete instead.
  * @device_cpu_is_halted: Flag to indicate whether the device CPU was already
  *                        halted. We can't halt it again because the COMMS
  *                        protocol will throw an error. Relevant only for
  *                        cases where Linux was not loaded to device CPU
  * @supports_wait_for_multi_cs: true if wait for multi CS is supported
- * @is_in_soft_reset: Device is currently in soft reset process.
  * @is_compute_ctx_active: Whether there is an active compute context executing.
  */
 struct hl_device {
        struct hl_clk_throttle          clk_throttling;
        struct last_error_session_info  last_error;
 
+       struct hl_reset_info            reset_info;
+
        u32                             *stream_master_qid_arr;
        atomic64_t                      dram_used_mem;
        u64                             timeout_jiffies;
        u64                             last_open_session_duration_jif;
        u64                             open_counter;
        u64                             fw_poll_interval_usec;
-       atomic_t                        in_reset;
        ktime_t                         last_successful_open_ktime;
        enum cpucp_card_types           card_type;
        u32                             major;
        u32                             high_pll;
-       u32                             soft_reset_cnt;
-       u32                             hard_reset_cnt;
        u16                             id;
        u16                             id_control;
        u16                             cpu_pci_msb_addr;
        u8                              disabled;
        u8                              late_init_done;
        u8                              hwmon_initialized;
-       u8                              hard_reset_pending;
        u8                              heartbeat;
        u8                              reset_on_lockup;
        u8                              dram_default_page_mapping;
        u8                              sync_stream_queue_idx;
        u8                              collective_mon_idx;
        u8                              supports_coresight;
-       u8                              supports_soft_reset;
-       u8                              allow_inference_soft_reset;
        u8                              supports_cb_mapping;
-       u8                              needs_reset;
        u8                              process_kill_trial_cnt;
        u8                              device_fini_pending;
        u8                              supports_staged_submission;
-       u8                              curr_reset_cause;
-       u8                              prev_reset_trigger;
-       u8                              reset_trigger_repeated;
-       u8                              skip_reset_on_timeout;
        u8                              device_cpu_is_halted;
        u8                              supports_wait_for_multi_cs;
        u8                              stream_master_qid_arr_size;
-       u8                              is_in_soft_reset;
        u8                              is_compute_ctx_active;
 
        /* Parameters for bring-up */
 
        hdev->fw_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
 
        hdev->stop_on_err = true;
-       hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
-       hdev->prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
+       hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+       hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
 
        /* Enable only after the initialization of the device */
        hdev->disabled = true;
 
        if ((!max_size) || (!out))
                return -EINVAL;
 
-       reset_count.hard_reset_cnt = hdev->hard_reset_cnt;
-       reset_count.soft_reset_cnt = hdev->soft_reset_cnt;
+       reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt;
+       reset_count.soft_reset_cnt = hdev->reset_info.soft_reset_cnt;
 
        return copy_to_user(out, &reset_count,
                min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
 
                 */
                dma_rmb();
 
-               if (hdev->disabled && !hdev->is_in_soft_reset) {
+               if (hdev->disabled && !hdev->reset_info.is_in_soft_reset) {
                        dev_warn(hdev->dev, "Device disabled but received an EQ event\n");
                        goto skip_irq;
                }
 
         * Clearly something went wrong on hard reset so no point in printing
         * another side effect error
         */
-       if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
+       if (!hdev->reset_info.hard_reset_pending && !hash_empty(ctx->mem_hash))
                dev_dbg(hdev->dev,
                        "user released device without removing its memory mappings\n");
 
 
                goto out;
        }
 
-       if (!hdev->allow_inference_soft_reset) {
+       if (!hdev->asic_prop.allow_inference_soft_reset) {
                dev_err(hdev->dev, "Device does not support inference soft-reset\n");
                goto out;
        }
 {
        struct hl_device *hdev = dev_get_drvdata(dev);
 
-       return sprintf(buf, "%d\n", hdev->soft_reset_cnt);
+       return sprintf(buf, "%d\n", hdev->reset_info.soft_reset_cnt);
 }
 
 static ssize_t hard_reset_cnt_show(struct device *dev,
 {
        struct hl_device *hdev = dev_get_drvdata(dev);
 
-       return sprintf(buf, "%d\n", hdev->hard_reset_cnt);
+       return sprintf(buf, "%d\n", hdev->reset_info.hard_reset_cnt);
 }
 
 static ssize_t max_power_show(struct device *dev, struct device_attribute *attr,
                return rc;
        }
 
-       if (!hdev->allow_inference_soft_reset)
+       if (!hdev->asic_prop.allow_inference_soft_reset)
                return 0;
 
        rc = device_add_groups(hdev->dev, hl_dev_inference_attr_groups);
 {
        device_remove_groups(hdev->dev, hl_dev_attr_groups);
 
-       if (!hdev->allow_inference_soft_reset)
+       if (!hdev->asic_prop.allow_inference_soft_reset)
                return;
 
        device_remove_groups(hdev->dev, hl_dev_inference_attr_groups);
 
                 * In case watchdog hasn't expired but we still got HB, then this won't do any
                 * damage.
                 */
-               if (hdev->curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT) {
+               if (hdev->reset_info.curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT) {
                        if (hdev->asic_prop.hard_reset_done_by_fw)
                                hl_fw_ask_hard_reset_without_linux(hdev);
                        else
 {
        struct gaudi_device *gaudi = hdev->asic_specific;
 
-       if (hdev->hard_reset_pending)
+       if (hdev->reset_info.hard_reset_pending)
                return U64_MAX;
 
        return readq(hdev->pcie_bar[HBM_BAR_ID] +
 {
        struct gaudi_device *gaudi = hdev->asic_specific;
 
-       if (hdev->hard_reset_pending)
+       if (hdev->reset_info.hard_reset_pending)
                return;
 
        writeq(val, hdev->pcie_bar[HBM_BAR_ID] +
        int rc;
 
        if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
-               hdev->hard_reset_pending)
+               hdev->reset_info.hard_reset_pending)
                return 0;
 
        if (hdev->pldm)
 
 
        spin_lock_init(&goya->hw_queues_lock);
        hdev->supports_coresight = true;
-       hdev->supports_soft_reset = true;
-       hdev->allow_inference_soft_reset = true;
+       hdev->asic_prop.supports_soft_reset = true;
+       hdev->asic_prop.allow_inference_soft_reset = true;
        hdev->supports_wait_for_multi_cs = false;
 
        hdev->asic_funcs->set_pci_memory_regions(hdev);
 {
        struct goya_device *goya = hdev->asic_specific;
 
-       if (hdev->hard_reset_pending)
+       if (hdev->reset_info.hard_reset_pending)
                return U64_MAX;
 
        return readq(hdev->pcie_bar[DDR_BAR_ID] +
 {
        struct goya_device *goya = hdev->asic_specific;
 
-       if (hdev->hard_reset_pending)
+       if (hdev->reset_info.hard_reset_pending)
                return;
 
        writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
        int rc;
 
        if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
-               hdev->hard_reset_pending)
+               hdev->reset_info.hard_reset_pending)
                return 0;
 
        /* no need in L1 only invalidation in Goya */