return ret;
}
+static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ int ret, ver = 0;
+
+ if (host->hw_ver.major)
+ return;
+
+ /* Set default (minimum) version anyway */
+ host->hw_ver.major = 2;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
+ if (!ret) {
+ if (ver >= UFS_UNIPRO_VER_1_8)
+ host->hw_ver.major = 3;
+ }
+}
+
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_dev_params host_cap;
+ u32 adapt_val;
int ret;
host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
__func__);
}
+ if (host->hw_ver.major >= 3) {
+ if (dev_req_params->gear_tx == UFS_HS_G4)
+ adapt_val = PA_INITIAL_ADAPT;
+ else
+ adapt_val = PA_NO_ADAPT;
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+ adapt_val);
+ }
+
return ret;
}
int ret;
u32 tmp;
+ ufs_mtk_get_controller_version(hba);
+
ret = ufs_mtk_unipro_set_lpm(hba, false);
if (ret)
return ret;
*/
#define UFS_MTK_LIMIT_NUM_LANES_RX 2
#define UFS_MTK_LIMIT_NUM_LANES_TX 2
-#define UFS_MTK_LIMIT_HSGEAR_RX UFS_HS_G3
-#define UFS_MTK_LIMIT_HSGEAR_TX UFS_HS_G3
+#define UFS_MTK_LIMIT_HSGEAR_RX UFS_HS_G4
+#define UFS_MTK_LIMIT_HSGEAR_TX UFS_HS_G4
#define UFS_MTK_LIMIT_PWMGEAR_RX UFS_PWM_G4
#define UFS_MTK_LIMIT_PWMGEAR_TX UFS_PWM_G4
#define UFS_MTK_LIMIT_RX_PWR_PWM SLOW_MODE
int vcore_volt;
};
+struct ufs_mtk_hw_ver {
+ u8 step;
+ u8 minor;
+ u8 major;
+};
+
struct ufs_mtk_host {
struct phy *mphy;
struct regulator *reg_va09;
struct reset_control *crypto_reset;
struct ufs_hba *hba;
struct ufs_mtk_crypt_cfg *crypt;
+ struct ufs_mtk_hw_ver hw_ver;
enum ufs_mtk_host_caps caps;
bool mphy_powered_on;
bool unipro_lpm;