goto out;
        }
 
+       wl->fw_mem_block_size = 256;
+       wl->fwlog_end = 0x2000000;
+
        /* common settings */
        wl->scan_templ_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY;
        wl->scan_templ_id_5 = CMD_TEMPL_APP_PROBE_REQ_5_LEGACY;
        return true;
 }
 
+static u32 wl12xx_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
+{
+       return hwaddr << 5;
+}
+
 static int wl12xx_setup(struct wl1271 *wl);
 
 static struct wlcore_ops wl12xx_ops = {
        .channel_switch         = wl12xx_cmd_channel_switch,
        .pre_pkt_send           = NULL,
        .set_peer_cap           = wl12xx_set_peer_cap,
+       .convert_hwaddr         = wl12xx_convert_hwaddr,
        .lnk_high_prio          = wl12xx_lnk_high_prio,
        .lnk_low_prio           = wl12xx_lnk_low_prio,
 };
 
                goto out;
        }
 
+       wl->fw_mem_block_size = 272;
+       wl->fwlog_end = 0x40000000;
+
        wl->scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
        wl->scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
        wl->sched_scan_templ_id_2_4 = CMD_TEMPL_PROBE_REQ_2_4_PERIODIC;
        return lnk->allocated_pkts < thold;
 }
 
+static u32 wl18xx_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
+{
+       return hwaddr & ~0x80000000;
+}
+
 static int wl18xx_setup(struct wl1271 *wl);
 
 static struct wlcore_ops wl18xx_ops = {
        .pre_pkt_send   = wl18xx_pre_pkt_send,
        .sta_rc_update  = wl18xx_sta_rc_update,
        .set_peer_cap   = wl18xx_set_peer_cap,
+       .convert_hwaddr = wl18xx_convert_hwaddr,
        .lnk_high_prio  = wl18xx_lnk_high_prio,
        .lnk_low_prio   = wl18xx_lnk_low_prio,
 };
 
 
 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
 {
-       size_t len = 0;
-
-       /* The FW log is a length-value list, find where the log end */
-       while (len < maxlen) {
-               if (memblock[len] == 0)
-                       break;
-               if (len + memblock[len] + 1 > maxlen)
-                       break;
-               len += memblock[len] + 1;
-       }
+       size_t len;
 
        /* Make sure we have enough room */
-       len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
+       len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
 
        /* Fill the FW log file, consumed by the sysfs fwlog entry */
        memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
        return len;
 }
 
-#define WLCORE_FW_LOG_END 0x2000000
-
 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
 {
+       struct wlcore_partition_set part, old_part;
        u32 addr;
        u32 offset;
        u32 end_of_log;
 
        wl1271_info("Reading FW panic log");
 
-       block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
+       block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
        if (!block)
                return;
 
 
        if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
                offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
-               end_of_log = WLCORE_FW_LOG_END;
+               end_of_log = wl->fwlog_end;
        } else {
                offset = sizeof(addr);
                end_of_log = addr;
        }
 
+       old_part = wl->curr_part;
+       memset(&part, 0, sizeof(part));
+
        /* Traverse the memory blocks linked list */
        do {
-               memset(block, 0, WL12XX_HW_BLOCK_SIZE);
-               ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
-                                        false);
+               part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
+               part.mem.size  = PAGE_SIZE;
+
+               ret = wlcore_set_partition(wl, &part);
+               if (ret < 0) {
+                       wl1271_error("%s: set_partition start=0x%X size=%d",
+                               __func__, part.mem.start, part.mem.size);
+                       goto out;
+               }
+
+               memset(block, 0, wl->fw_mem_block_size);
+               ret = wlcore_read_hwaddr(wl, addr, block,
+                                       wl->fw_mem_block_size, false);
+
                if (ret < 0)
                        goto out;
 
                 * on demand mode and is equal to 0x2000000 in continuous mode.
                 */
                addr = le32_to_cpup((__le32 *)block);
+
                if (!wl12xx_copy_fwlog(wl, block + offset,
-                                      WL12XX_HW_BLOCK_SIZE - offset))
+                                       wl->fw_mem_block_size - offset))
                        break;
        } while (addr && (addr != end_of_log));
 
 
 out:
        kfree(block);
+       wlcore_set_partition(wl, &old_part);
 }
 
 static void wlcore_print_recovery(struct wl1271 *wl)
 
                            struct ieee80211_sta_ht_cap *ht_cap,
                            bool allow_ht_operation,
                            u32 rate_set, u8 hlid);
+       u32 (*convert_hwaddr)(struct wl1271 *wl, u32 hwaddr);
        bool (*lnk_high_prio)(struct wl1271 *wl, u8 hlid,
                              struct wl1271_link *lnk);
        bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid,
        /* Number of valid bytes in the FW log buffer */
        ssize_t fwlog_size;
 
+       /* FW log end marker */
+       u32 fwlog_end;
+
+       /* FW memory block size */
+       u32 fw_mem_block_size;
+
        /* Sysfs FW log entry readers wait queue */
        wait_queue_head_t fwlog_waitq;