From 6defecd943e92e4341cfa088ef4f4cc19ff2db8b Mon Sep 17 00:00:00 2001 From: Andrew Goodbody Date: Tue, 7 Oct 2025 12:42:12 +0100 Subject: ufs: ti-j721e: Correct error detection In ti_j721e_ufs_probe there is a call to clk_get_rate but the code after that attempts to detect an error from that call incorrectly uses IS_ERR_VALUE. Instead the test should just be for regular error codes. The call returns an unsigned long so that needs to be cast to a signed type first of all. This issue was found by Smatch. Signed-off-by: Andrew Goodbody Reviewed-by: Anshul Dalal Link: https://patch.msgid.link/20251007-ufs_ti-v2-1-501f575b6947@linaro.org Signed-off-by: Neil Armstrong --- drivers/ufs/ti-j721e-ufs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/ufs/ti-j721e-ufs.c b/drivers/ufs/ti-j721e-ufs.c index c5c08610ffd..176d9b0e5c3 100644 --- a/drivers/ufs/ti-j721e-ufs.c +++ b/drivers/ufs/ti-j721e-ufs.c @@ -17,7 +17,7 @@ static int ti_j721e_ufs_probe(struct udevice *dev) { void __iomem *base; - unsigned int clock; + unsigned long clock; struct clk clk; u32 reg = 0; int ret; @@ -29,9 +29,9 @@ static int ti_j721e_ufs_probe(struct udevice *dev) } clock = clk_get_rate(&clk); - if (IS_ERR_VALUE(clock)) { + if ((long)clock <= 0) { dev_err(dev, "failed to get rate\n"); - return ret; + return clock ? clock : -EIO; } base = dev_remap_addr_index(dev, 0); -- cgit v1.2.3 From d0dfb8c635ab4ff60090878879c9f75f9a7193da Mon Sep 17 00:00:00 2001 From: Kunihiko Hayashi Date: Fri, 10 Oct 2025 11:45:57 +0900 Subject: ufs: Fix wrong bitfield usage for Data Direction in Transfer Request Commit d232d7fdbf6f ("ufs: core: sync ufshci.h with Linux v6.12") updated the Data Direction values from bitmask values to simple enumerations. Before: enum { UTP_NO_DATA_TRANSFER = 0x00000000, UTP_HOST_TO_DEVICE = 0x02000000, UTP_DEVICE_TO_HOST = 0x04000000, }; Updated: enum utp_data_direction { UTP_NO_DATA_TRANSFER = 0, UTP_HOST_TO_DEVICE = 1, UTP_DEVICE_TO_HOST = 2, }; However, the U-Boot code still uses these values directly without shifting, and resulting in wrong bitfield placement in the Transfer Request Descriptor. This fixes the issue by applying the necessary shift to align the value. Fixes: d232d7fdbf6f ("ufs: core: sync ufshci.h with Linux v6.12") Signed-off-by: Kunihiko Hayashi Reviewed-by: Neil Armstrong Link: https://patch.msgid.link/20251010024557.673787-1-hayashi.kunihiko@socionext.com Signed-off-by: Neil Armstrong --- drivers/ufs/ufs.c | 3 ++- drivers/ufs/ufs.h | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c index 57e6e8c013b..1d52ff64a4a 100644 --- a/drivers/ufs/ufs.c +++ b/drivers/ufs/ufs.c @@ -761,7 +761,8 @@ static void ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, *upiu_flags = UPIU_CMD_FLAGS_NONE; } - dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET); + dword_0 = (data_direction << UPIU_DATA_DIRECTION_OFFSET) + | (0x1 << UPIU_COMMAND_TYPE_OFFSET); /* Enable Interrupt for command */ dword_0 |= UTP_REQ_DESC_INT_CMD; diff --git a/drivers/ufs/ufs.h b/drivers/ufs/ufs.h index 0337ac5996b..8dfa4eaa3be 100644 --- a/drivers/ufs/ufs.h +++ b/drivers/ufs/ufs.h @@ -77,6 +77,9 @@ enum { /* UTP Transfer Request Command Offset */ #define UPIU_COMMAND_TYPE_OFFSET 28 +/* UTP Transfer Request Data Direction Offset */ +#define UPIU_DATA_DIRECTION_OFFSET 25 + /* Offset of the response code in the UPIU header */ #define UPIU_RSP_CODE_OFFSET 8 -- cgit v1.2.3 From ae0872e9c38394be8fcdb38ef391e635d4cf7a22 Mon Sep 17 00:00:00 2001 From: Jared McArthur Date: Fri, 10 Oct 2025 14:55:55 -0500 Subject: ufs: Add support for sending UFS attribute requests Some UFS attributes must be set before a UFS device is initialized. Add ufshcd_query_attr and ufshcd_query_attr_retry to send UFS attribute requests. Taken from Linux Kernel v6.17 (drivers/ufs/core/ufshcd.c) and ported to U-Boot. Signed-off-by: Jared McArthur Reviewed-by: Bryan Brattlof Reviewed-by: Neil Armstrong Reviewed-by: Udit Kumar Reviewed-by: Neha Malcom Francis Link: https://patch.msgid.link/20251010195556.1772611-2-j-mcarthur@ti.com Signed-off-by: Neil Armstrong --- drivers/ufs/ufs.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c index 1d52ff64a4a..b64556a06d1 100644 --- a/drivers/ufs/ufs.c +++ b/drivers/ufs/ufs.c @@ -1123,6 +1123,101 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, return ret; } +/** + * ufshcd_query_attr - API function for sending attribute requests + * @hba: per-adapter instance + * @opcode: attribute opcode + * @idn: attribute idn to access + * @index: index field + * @selector: selector field + * @attr_val: the attribute value after the query request completes + * + * Return: 0 for success, non-zero in case of failure. + */ +int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err; + + if (!attr_val) { + dev_err(hba->dev, + "%s: attribute value required for opcode 0x%x\n", + __func__, opcode); + return -EINVAL; + } + + ufshcd_init_query(hba, &request, &response, opcode, idn, index, selector); + + switch (opcode) { + case UPIU_QUERY_OPCODE_WRITE_ATTR: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + request->upiu_req.value = cpu_to_be32(*attr_val); + break; + case UPIU_QUERY_OPCODE_READ_ATTR: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + break; + default: + dev_err(hba->dev, + "%s: Expected query attr opcode but got = 0x%.2x\n", + __func__, opcode); + err = -EINVAL; + goto out; + } + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, + "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", + __func__, opcode, idn, index, err); + goto out; + } + + *attr_val = be32_to_cpu(response->upiu_res.value); + +out: + return err; +} + +/** + * ufshcd_query_attr_retry() - API function for sending query + * attribute with retries + * @hba: per-adapter instance + * @opcode: attribute opcode + * @idn: attribute idn to access + * @index: index field + * @selector: selector field + * @attr_val: the attribute value after the query request + * completes + * + * Return: 0 for success, non-zero in case of failure. + */ +int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, + u32 *attr_val) +{ + int ret = 0; + u32 retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + ret = ufshcd_query_attr(hba, opcode, idn, index, selector, attr_val); + if (ret) + dev_dbg(hba->dev, + "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + + if (ret) + dev_err(hba->dev, + "%s: query attribute, idn %d, failed with error %d after %d retries\n", + __func__, idn, ret, QUERY_REQ_RETRIES); + return ret; +} + static int __ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode, enum desc_idn idn, u8 index, u8 selector, -- cgit v1.2.3 From c7327ac29590952089bf3d8994d0951e58cc2322 Mon Sep 17 00:00:00 2001 From: Jared McArthur Date: Fri, 10 Oct 2025 14:55:56 -0500 Subject: ufs: Add bRefClkFreq attribute setting A UFS device needs its bRefClkFreq attribute set to the correct value before switching to high speed. If bRefClkFreq is set to the wrong value, all transactions after the power mode change will fail. The bRefClkFreq depends on the host controller and the device. Query the device's current bRefClkFreq and compare with the ref_clk specified in the device-tree. If the two differ, set the bRefClkFreq to the device-tree's ref_clk frequency. Taken from Linux kernel v6.17 (drivers/ufs/core/ufshcd.c and include/ufs/ufs.h) and ported to U-Boot. Signed-off-by: Jared McArthur Reviewed-by: Bryan Brattlof Reviewed-by: Udit Kumar Reviewed-by: Neha Malcom Francis Link: https://patch.msgid.link/20251010195556.1772611-3-j-mcarthur@ti.com Signed-off-by: Neil Armstrong --- drivers/ufs/ufs.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ drivers/ufs/ufs.h | 9 ++++++ 2 files changed, 94 insertions(+) diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c index b64556a06d1..2125e70a054 100644 --- a/drivers/ufs/ufs.c +++ b/drivers/ufs/ufs.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -1789,6 +1790,88 @@ out: return err; } +struct ufs_ref_clk { + unsigned long freq_hz; + enum ufs_ref_clk_freq val; +}; + +static const struct ufs_ref_clk ufs_ref_clk_freqs[] = { + {19200000, REF_CLK_FREQ_19_2_MHZ}, + {26000000, REF_CLK_FREQ_26_MHZ}, + {38400000, REF_CLK_FREQ_38_4_MHZ}, + {52000000, REF_CLK_FREQ_52_MHZ}, + {0, REF_CLK_FREQ_INVAL}, +}; + +static enum ufs_ref_clk_freq +ufs_get_bref_clk_from_hz(unsigned long freq) +{ + int i; + + for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++) + if (ufs_ref_clk_freqs[i].freq_hz == freq) + return ufs_ref_clk_freqs[i].val; + + return REF_CLK_FREQ_INVAL; +} + +enum ufs_ref_clk_freq ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) +{ + unsigned long freq; + + freq = clk_get_rate(refclk); + return ufs_get_bref_clk_from_hz(freq); +} + +static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) +{ + int err; + struct clk *ref_clk; + u32 host_ref_clk_freq; + u32 dev_ref_clk_freq; + + /* get ref_clk */ + ref_clk = devm_clk_get(hba->dev, "ref_clk"); + if (IS_ERR((const void *)ref_clk)) { + err = PTR_ERR(ref_clk); + goto out; + } + + host_ref_clk_freq = ufshcd_parse_dev_ref_clk_freq(hba, ref_clk); + if (host_ref_clk_freq == REF_CLK_FREQ_INVAL) + dev_err(hba->dev, + "invalid ref_clk setting = %ld\n", clk_get_rate(ref_clk)); + + if (host_ref_clk_freq == REF_CLK_FREQ_INVAL) + goto out; + + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &dev_ref_clk_freq); + + if (err) { + dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", err); + goto out; + } + + if (dev_ref_clk_freq == host_ref_clk_freq) + goto out; /* nothing to update */ + + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &host_ref_clk_freq); + + if (err) { + dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", + ufs_ref_clk_freqs[host_ref_clk_freq].freq_hz); + goto out; + } + + dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", + ufs_ref_clk_freqs[host_ref_clk_freq].freq_hz); + +out: + return err; +} + /** * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device */ @@ -2016,6 +2099,8 @@ static int ufs_start(struct ufs_hba *hba) return ret; } + ufshcd_set_dev_ref_clk(hba); + if (ufshcd_get_max_pwr_mode(hba)) { dev_err(hba->dev, "%s: Failed getting max supported power mode\n", diff --git a/drivers/ufs/ufs.h b/drivers/ufs/ufs.h index 8dfa4eaa3be..d38b6fec2ca 100644 --- a/drivers/ufs/ufs.h +++ b/drivers/ufs/ufs.h @@ -175,6 +175,15 @@ enum query_opcode { UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8, }; +/* bRefClkFreq attribute values */ +enum ufs_ref_clk_freq { + REF_CLK_FREQ_19_2_MHZ = 0, + REF_CLK_FREQ_26_MHZ = 1, + REF_CLK_FREQ_38_4_MHZ = 2, + REF_CLK_FREQ_52_MHZ = 3, + REF_CLK_FREQ_INVAL = -1, +}; + /* Query response result code */ enum { QUERY_RESULT_SUCCESS = 0x00, -- cgit v1.2.3 From 84919722e6a7af63729e2289588f4dfb37b43879 Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Mon, 20 Oct 2025 16:16:21 +0800 Subject: ufs: core: Add ufshcd_dme_enable() and ufshcd_dme_reset() In order for host drivers to use. Reviewed-by: Neil Armstrong Signed-off-by: Shawn Lin Link: https://patch.msgid.link/1760948182-128561-1-git-send-email-shawn.lin@rock-chips.com Signed-off-by: Neil Armstrong --- drivers/ufs/ufs.c | 30 ++++++++++++++++++++++++++++++ drivers/ufs/ufs.h | 2 ++ 2 files changed, 32 insertions(+) diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c index 2125e70a054..b4f994c5bc0 100644 --- a/drivers/ufs/ufs.c +++ b/drivers/ufs/ufs.c @@ -369,6 +369,36 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) return ret; } +int ufshcd_dme_enable(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_ENABLE; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, + "dme-enable: error code %d\n", ret); + + return ret; +} + +int ufshcd_dme_reset(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_RESET; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, + "dme-reset: error code %d\n", ret); + + return ret; +} + /** * ufshcd_disable_intr_aggr - Disables interrupt aggregation. * diff --git a/drivers/ufs/ufs.h b/drivers/ufs/ufs.h index d38b6fec2ca..bc839a43704 100644 --- a/drivers/ufs/ufs.h +++ b/drivers/ufs/ufs.h @@ -447,6 +447,8 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, u32 mib_val, u8 peer); int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, u32 *mib_val, u8 peer); +int ufshcd_dme_enable(struct ufs_hba *hba); +int ufshcd_dme_reset(struct ufs_hba *hba); static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, u32 mib_val) -- cgit v1.2.3 From 76465ce21ee4ffe0491125a52a5db19171baad34 Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Mon, 20 Oct 2025 16:16:22 +0800 Subject: ufs: rockchip: Add initial support This patch adds initial support for UFS controller on Rockchip platforms. Reviewed-by: Neil Armstrong Signed-off-by: Shawn Lin Link: https://patch.msgid.link/1760948182-128561-2-git-send-email-shawn.lin@rock-chips.com Signed-off-by: Neil Armstrong --- MAINTAINERS | 1 + drivers/ufs/Kconfig | 9 ++ drivers/ufs/Makefile | 1 + drivers/ufs/ufs-rockchip.c | 211 +++++++++++++++++++++++++++++++++++++++++++++ drivers/ufs/ufs-rockchip.h | 88 +++++++++++++++++++ 5 files changed, 310 insertions(+) create mode 100644 drivers/ufs/ufs-rockchip.c create mode 100644 drivers/ufs/ufs-rockchip.h diff --git a/MAINTAINERS b/MAINTAINERS index 671903605d1..ee4ab3711b3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -575,6 +575,7 @@ F: drivers/mmc/rockchip_dw_mmc.c F: drivers/pinctrl/rockchip/ F: drivers/ram/rockchip/ F: drivers/sysreset/sysreset_rockchip.c +F: drivers/ufs/*rockchip* F: drivers/video/rockchip/ F: tools/rkcommon.c F: tools/rkcommon.h diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig index b08ca08b07c..3d367855173 100644 --- a/drivers/ufs/Kconfig +++ b/drivers/ufs/Kconfig @@ -33,6 +33,15 @@ config QCOM_UFS This selects the platform driver for the UFS host controller present on Qualcomm Snapdragon SoCs. +config ROCKCHIP_UFS + bool "Rockchip specific hooks to UFS controller platform driver" + depends on UFS + help + This selects the Rockchip specific additions to UFSHCD platform driver. + + Select this if you have UFS controller on Rockchip chipset. + If unsure, say N. + config TI_J721E_UFS bool "Glue Layer driver for UFS on TI J721E devices" help diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile index 2a378e45111..41e910cfc51 100644 --- a/drivers/ufs/Makefile +++ b/drivers/ufs/Makefile @@ -9,4 +9,5 @@ obj-$(CONFIG_QCOM_UFS) += ufs-qcom.o obj-$(CONFIG_TI_J721E_UFS) += ti-j721e-ufs.o obj-$(CONFIG_UFS_PCI) += ufs-pci.o obj-$(CONFIG_UFS_RENESAS) += ufs-renesas.o +obj-$(CONFIG_ROCKCHIP_UFS) += ufs-rockchip.o obj-$(CONFIG_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o diff --git a/drivers/ufs/ufs-rockchip.c b/drivers/ufs/ufs-rockchip.c new file mode 100644 index 00000000000..aa924e5cedc --- /dev/null +++ b/drivers/ufs/ufs-rockchip.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Rockchip UFS Host Controller driver + * + * Copyright (C) 2025 Rockchip Electronics Co.Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ufs.h" +#include "unipro.h" +#include "ufs-rockchip.h" + +static int ufs_rockchip_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + + if (status != POST_CHANGE) + return 0; + + ufshcd_dme_reset(hba); + ufshcd_dme_enable(hba); + + if (hba->ops->phy_initialization) { + err = hba->ops->phy_initialization(hba); + if (err) + dev_err(hba->dev, + "Phy init failed (%d)\n", err); + } + + return err; +} + +static void ufs_rockchip_rk3576_phy_parameter_init(struct ufs_hba *hba) +{ + struct ufs_rockchip_host *host = dev_get_priv(hba->dev); + + ufs_sys_writel(host->mphy_base, 0x80, CMN_REG23); + ufs_sys_writel(host->mphy_base, 0xB5, TRSV0_REG14); + ufs_sys_writel(host->mphy_base, 0xB5, TRSV1_REG14); + ufs_sys_writel(host->mphy_base, 0x03, TRSV0_REG15); + ufs_sys_writel(host->mphy_base, 0x03, TRSV1_REG15); + ufs_sys_writel(host->mphy_base, 0x38, TRSV0_REG08); + ufs_sys_writel(host->mphy_base, 0x38, TRSV1_REG08); + ufs_sys_writel(host->mphy_base, 0x50, TRSV0_REG29); + ufs_sys_writel(host->mphy_base, 0x50, TRSV1_REG29); + ufs_sys_writel(host->mphy_base, 0x80, TRSV0_REG2E); + ufs_sys_writel(host->mphy_base, 0x80, TRSV1_REG2E); + ufs_sys_writel(host->mphy_base, 0x18, TRSV0_REG3C); + ufs_sys_writel(host->mphy_base, 0x18, TRSV1_REG3C); + ufs_sys_writel(host->mphy_base, 0x03, TRSV0_REG16); + ufs_sys_writel(host->mphy_base, 0x03, TRSV1_REG16); + ufs_sys_writel(host->mphy_base, 0x20, TRSV0_REG17); + ufs_sys_writel(host->mphy_base, 0x20, TRSV1_REG17); + ufs_sys_writel(host->mphy_base, 0xC0, TRSV0_REG18); + ufs_sys_writel(host->mphy_base, 0xC0, TRSV1_REG18); + ufs_sys_writel(host->mphy_base, 0x03, CMN_REG25); + ufs_sys_writel(host->mphy_base, 0x03, TRSV0_REG3D); + ufs_sys_writel(host->mphy_base, 0x03, TRSV1_REG3D); + ufs_sys_writel(host->mphy_base, 0xC0, CMN_REG23); + udelay(1); + ufs_sys_writel(host->mphy_base, 0x00, CMN_REG23); + udelay(200); +} + +static int ufs_rockchip_rk3576_phy_init(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(PA_LOCAL_TX_LCC_ENABLE, 0x0), 0x0); + /* enable the mphy DME_SET cfg */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MPHY_CFG, 0x0), MPHY_CFG_ENABLE); + for (int i = 0; i < 2; i++) { + /* Configuration M-TX */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, SEL_TX_LANE0 + i), 0x06); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, SEL_TX_LANE0 + i), 0x02); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_VALUE, SEL_TX_LANE0 + i), 0x44); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, SEL_TX_LANE0 + i), 0xe6); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, SEL_TX_LANE0 + i), 0x07); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_TASE_VALUE, SEL_TX_LANE0 + i), 0x93); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_BASE_NVALUE, SEL_TX_LANE0 + i), 0xc9); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_POWER_SAVING_CTRL, SEL_TX_LANE0 + i), 0x00); + /* Configuration M-RX */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, SEL_RX_LANE0 + i), 0x06); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, SEL_RX_LANE0 + i), 0x00); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE, SEL_RX_LANE0 + i), 0x58); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_PVALUE1, SEL_RX_LANE0 + i), 0x8c); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_PVALUE2, SEL_RX_LANE0 + i), 0x02); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_OPTION, SEL_RX_LANE0 + i), 0xf6); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_POWER_SAVING_CTRL, SEL_RX_LANE0 + i), 0x69); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_SAVE_DET_CTRL, SEL_RX_LANE0 + i), 0x18); + } + + /* disable the mphy DME_SET cfg */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MPHY_CFG, 0x0), MPHY_CFG_DISABLE); + + ufs_rockchip_rk3576_phy_parameter_init(hba); + + /* start link up */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MIB_T_DBG_CPORT_TX_ENDIAN, 0), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MIB_T_DBG_CPORT_RX_ENDIAN, 0), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(N_DEVICEID, 0), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(N_DEVICEID_VALID, 0), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(T_PEERDEVICEID, 0), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(T_CONNECTIONSTATE, 0), 0x1); + + return 0; +} + +static int ufs_rockchip_common_init(struct ufs_hba *hba) +{ + struct udevice *dev = hba->dev; + struct ufs_rockchip_host *host = dev_get_priv(dev); + struct resource res; + int err; + + /* system control register for hci */ + err = dev_read_resource_byname(dev, "hci_grf", &res); + if (err) { + dev_err(dev, "cannot ioremap for hci system control register\n"); + return err; + } + host->ufs_sys_ctrl = (void *)(res.start); + + /* system control register for mphy */ + err = dev_read_resource_byname(dev, "mphy_grf", &res); + if (err) { + dev_err(dev, "cannot ioremap for mphy system control register\n"); + return err; + } + host->ufs_phy_ctrl = (void *)(res.start); + + /* mphy base register */ + err = dev_read_resource_byname(dev, "mphy", &res); + if (err) { + dev_err(dev, "cannot ioremap for mphy base register\n"); + return err; + } + host->mphy_base = (void *)(res.start); + + host->phy_config_mode = dev_read_u32_default(dev, "ufs-phy-config-mode", 0); + + err = reset_get_bulk(dev, &host->rsts); + if (err) { + dev_err(dev, "Can't get reset: %d\n", err); + return err; + } + + host->hba = hba; + + return 0; +} + +static int ufs_rockchip_rk3576_init(struct ufs_hba *hba) +{ + int ret = 0; + + ret = ufs_rockchip_common_init(hba); + if (ret) { + dev_err(hba->dev, "%s: ufs common init fail\n", __func__); + return ret; + } + + return 0; +} + +static struct ufs_hba_ops ufs_hba_rk3576_vops = { + .init = ufs_rockchip_rk3576_init, + .phy_initialization = ufs_rockchip_rk3576_phy_init, + .hce_enable_notify = ufs_rockchip_hce_enable_notify, +}; + +static const struct udevice_id ufs_rockchip_of_match[] = { + { .compatible = "rockchip,rk3576-ufshc", .data = (ulong)&ufs_hba_rk3576_vops}, + {}, +}; + +static int ufs_rockchip_probe(struct udevice *dev) +{ + struct ufs_hba_ops *ops = (struct ufs_hba_ops *)dev_get_driver_data(dev); + int err; + + err = ufshcd_probe(dev, ops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +static int ufs_rockchip_bind(struct udevice *dev) +{ + struct udevice *scsi_dev; + + return ufs_scsi_bind(dev, &scsi_dev); +} + +U_BOOT_DRIVER(rockchip_ufs) = { + .name = "ufshcd-rockchip", + .id = UCLASS_UFS, + .of_match = ufs_rockchip_of_match, + .probe = ufs_rockchip_probe, + .bind = ufs_rockchip_bind, + .priv_auto = sizeof(struct ufs_rockchip_host), +}; diff --git a/drivers/ufs/ufs-rockchip.h b/drivers/ufs/ufs-rockchip.h new file mode 100644 index 00000000000..3dcb80f5702 --- /dev/null +++ b/drivers/ufs/ufs-rockchip.h @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Rockchip UFS Host Controller driver + * + * Copyright (C) 2025 Rockchip Electronics Co.Ltd. + */ + +#ifndef _UFS_ROCKCHIP_H_ +#define _UFS_ROCKCHIP_H_ + +#define UFS_MAX_CLKS 3 + +#define SEL_TX_LANE0 0x0 +#define SEL_TX_LANE1 0x1 +#define SEL_TX_LANE2 0x2 +#define SEL_TX_LANE3 0x3 +#define SEL_RX_LANE0 0x4 +#define SEL_RX_LANE1 0x5 +#define SEL_RX_LANE2 0x6 +#define SEL_RX_LANE3 0x7 + +#define VND_TX_CLK_PRD 0xAA +#define VND_TX_CLK_PRD_EN 0xA9 +#define VND_TX_LINERESET_PVALUE2 0xAB +#define VND_TX_LINERESET_PVALUE1 0xAC +#define VND_TX_LINERESET_VALUE 0xAD +#define VND_TX_BASE_NVALUE 0x93 +#define VND_TX_TASE_VALUE 0x94 +#define VND_TX_POWER_SAVING_CTRL 0x7F +#define VND_RX_CLK_PRD 0x12 +#define VND_RX_CLK_PRD_EN 0x11 +#define VND_RX_LINERESET_PVALUE2 0x1B +#define VND_RX_LINERESET_PVALUE1 0x1C +#define VND_RX_LINERESET_VALUE 0x1D +#define VND_RX_LINERESET_OPTION 0x25 +#define VND_RX_POWER_SAVING_CTRL 0x2F +#define VND_RX_SAVE_DET_CTRL 0x1E + +#define CMN_REG23 0x8C +#define CMN_REG25 0x94 +#define TRSV0_REG08 0xE0 +#define TRSV1_REG08 0x220 +#define TRSV0_REG14 0x110 +#define TRSV1_REG14 0x250 +#define TRSV0_REG15 0x134 +#define TRSV1_REG15 0x274 +#define TRSV0_REG16 0x128 +#define TRSV1_REG16 0x268 +#define TRSV0_REG17 0x12C +#define TRSV1_REG17 0x26c +#define TRSV0_REG18 0x120 +#define TRSV1_REG18 0x260 +#define TRSV0_REG29 0x164 +#define TRSV1_REG29 0x2A4 +#define TRSV0_REG2E 0x178 +#define TRSV1_REG2E 0x2B8 +#define TRSV0_REG3C 0x1B0 +#define TRSV1_REG3C 0x2F0 +#define TRSV0_REG3D 0x1B4 +#define TRSV1_REG3D 0x2F4 + +#define MPHY_CFG 0x200 +#define MPHY_CFG_ENABLE 0x40 +#define MPHY_CFG_DISABLE 0x0 + +#define MIB_T_DBG_CPORT_TX_ENDIAN 0xc022 +#define MIB_T_DBG_CPORT_RX_ENDIAN 0xc023 + +struct ufs_rockchip_host { + struct ufs_hba *hba; + void __iomem *ufs_phy_ctrl; + void __iomem *ufs_sys_ctrl; + void __iomem *mphy_base; + struct reset_ctl_bulk rsts; + struct clk ref_out_clk; + uint64_t caps; + uint32_t phy_config_mode; +}; + +#define ufs_sys_writel(base, val, reg) \ + writel((val), (base) + (reg)) +#define ufs_sys_readl(base, reg) readl((base) + (reg)) +#define ufs_sys_set_bits(base, mask, reg) \ + ufs_sys_writel((base), ((mask) | (ufs_sys_readl((base), (reg)))), (reg)) +#define ufs_sys_ctrl_clr_bits(base, mask, reg) \ + ufs_sys_writel((base), ((~(mask)) & (ufs_sys_readl((base), (reg)))), (reg)) + +#endif /* _UFS_ROCKCHIP_H_ */ -- cgit v1.2.3 From 200e3f893fbc2915797b466043bbcb19000d821a Mon Sep 17 00:00:00 2001 From: Igor Belwon Date: Sat, 11 Oct 2025 21:10:05 +0200 Subject: ufs: unipro: Add PA_SCRAMBLING property This property is required for proper I/O access on the MediaTek MT6878 UFS controller, and is part of UniPro specifications. Signed-off-by: Igor Belwon Link: https://patch.msgid.link/20251011-mtk-ufs-uboot-v1-2-a05f991ee150@mentallysanemainliners.org Signed-off-by: Neil Armstrong --- drivers/ufs/unipro.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/ufs/unipro.h b/drivers/ufs/unipro.h index 56833602b77..bde1490397c 100644 --- a/drivers/ufs/unipro.h +++ b/drivers/ufs/unipro.h @@ -125,6 +125,7 @@ #define PA_RXPWRSTATUS 0x1582 #define PA_RXGEAR 0x1583 #define PA_RXTERMINATION 0x1584 +#define PA_SCRAMBLING 0x1585 #define PA_MAXRXPWMGEAR 0x1586 #define PA_MAXRXHSGEAR 0x1587 #define PA_PACPREQTIMEOUT 0x1590 -- cgit v1.2.3 From 6cca3db2bd91ce1e5d812a61c13f1f62a2c2b82d Mon Sep 17 00:00:00 2001 From: Igor Belwon Date: Sat, 11 Oct 2025 21:10:06 +0200 Subject: ufs: Add MediaTek UFS driver Add the UFS driver for MediaTek platforms. Loosely based on the Linux driver, this UFS driver can successfully get a link and R/W access to the UFS chip on the MediaTek MT6878 mobile SoC, when U-Boot is running as lk, or as the kernel (Secure world access is not tested) Signed-off-by: Igor Belwon Link: https://patch.msgid.link/20251011-mtk-ufs-uboot-v1-3-a05f991ee150@mentallysanemainliners.org Signed-off-by: Neil Armstrong --- drivers/ufs/Kconfig | 14 ++ drivers/ufs/Makefile | 1 + drivers/ufs/ufs-mediatek-sip.h | 56 ++++++ drivers/ufs/ufs-mediatek.c | 403 +++++++++++++++++++++++++++++++++++++++++ drivers/ufs/ufs-mediatek.h | 210 +++++++++++++++++++++ 5 files changed, 684 insertions(+) create mode 100644 drivers/ufs/ufs-mediatek-sip.h create mode 100644 drivers/ufs/ufs-mediatek.c create mode 100644 drivers/ufs/ufs-mediatek.h diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig index 3d367855173..b030a972b98 100644 --- a/drivers/ufs/Kconfig +++ b/drivers/ufs/Kconfig @@ -15,6 +15,20 @@ config CADENCE_UFS This selects the platform driver for the Cadence UFS host controller present on present TI's J721e devices. +config UFS_MEDIATEK + tristate "MediaTek UFS Host Controller Driver" + depends on UFS && ARCH_MEDIATEK + select PHY_MTK_UFS + help + This selects the MediaTek specific additions to UFSHCD platform driver. + UFS host on Mediatek needs some vendor specific configuration before + accessing the hardware which includes PHY configuration and vendor + specific registers. + + Select this if you have UFS controller on MediaTek chipset. + + If unsure, say N. + config UFS_PCI bool "PCI bus based UFS Controller support" depends on PCI && UFS diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile index 41e910cfc51..bbee66caa8a 100644 --- a/drivers/ufs/Makefile +++ b/drivers/ufs/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_UFS) += ufs.o ufs-uclass.o obj-$(CONFIG_CADENCE_UFS) += cdns-platform.o +obj-$(CONFIG_UFS_MEDIATEK) += ufs-mediatek.o obj-$(CONFIG_QCOM_UFS) += ufs-qcom.o obj-$(CONFIG_TI_J721E_UFS) += ti-j721e-ufs.o obj-$(CONFIG_UFS_PCI) += ufs-pci.o diff --git a/drivers/ufs/ufs-mediatek-sip.h b/drivers/ufs/ufs-mediatek-sip.h new file mode 100644 index 00000000000..6a61f7bac69 --- /dev/null +++ b/drivers/ufs/ufs-mediatek-sip.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 MediaTek Inc. + * Copyright (c) 2025, Igor Belwon + * + * Slimmed down header from Linux: drivers/ufs/host/ufs-mediatek-sip.h + */ + +#ifndef _UFS_MEDIATEK_SIP_H +#define _UFS_MEDIATEK_SIP_H + +#include + +/* + * SiP (Slicon Partner) commands + */ +#define MTK_SIP_UFS_CONTROL ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_SIP, 0x276) +#define UFS_MTK_SIP_DEVICE_RESET BIT(1) +#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3) + +/* + * SMC call wrapper function + */ +struct ufs_mtk_smc_arg { + unsigned long cmd; + struct arm_smccc_res *res; + unsigned long v1; + unsigned long v2; + unsigned long v3; + unsigned long v4; + unsigned long v5; + unsigned long v6; + unsigned long v7; +}; + +static inline void _ufs_mtk_smc(struct ufs_mtk_smc_arg s) +{ + arm_smccc_smc(MTK_SIP_UFS_CONTROL, + s.cmd, + s.v1, s.v2, s.v3, s.v4, s.v5, s.v6, s.res); +} + +#define ufs_mtk_smc(...) \ + _ufs_mtk_smc((struct ufs_mtk_smc_arg) {__VA_ARGS__}) + +/* SIP interface */ + +#define ufs_mtk_ref_clk_notify(on, stage, res) \ + ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, &(res), on, stage) + +#define ufs_mtk_device_reset_ctrl(high, res) \ + ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, &(res), high) + +#endif /* !_UFS_MEDIATEK_SIP_H */ diff --git a/drivers/ufs/ufs-mediatek.c b/drivers/ufs/ufs-mediatek.c new file mode 100644 index 00000000000..10e7990d6bb --- /dev/null +++ b/drivers/ufs/ufs-mediatek.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025, Igor Belwon + * + * Loosely based on Linux driver: drivers/ufs/host/ufs-mediatek.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "ufs.h" +#include "ufs-mediatek.h" +#include "ufs-mediatek-sip.h" + +static void ufs_mtk_advertise_quirks(struct ufs_hba *hba) +{ + hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL | + UFSHCD_QUIRK_MCQ_BROKEN_INTR | + UFSHCD_QUIRK_BROKEN_LSDBS_CAP; +} + +static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_mtk_host *host = dev_get_priv(hba->dev); + + if (status == PRE_CHANGE) { + if (host->caps & UFS_MTK_CAP_DISABLE_AH8) { + ufshcd_writel(hba, 0, + REG_AUTO_HIBERNATE_IDLE_TIMER); + hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT; + } + + /* + * Turn on CLK_CG early to bypass abnormal ERR_CHK signal + * to prevent host hang issue + */ + ufshcd_writel(hba, + ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, + REG_UFS_XOUFS_CTRL); + + /* DDR_EN setting */ + if (host->ip_ver >= IP_VER_MT6989) { + ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8), + 0x453000, REG_UFS_MMIO_OPT_CTRL_0); + } + } + + return 0; +} + +static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm) +{ + int ret; + struct ufs_mtk_host *host = dev_get_priv(hba->dev); + + ret = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), + lpm ? 1 : 0); + if (!ret || !lpm) { + /* + * Forcibly set as non-LPM mode if UIC commands is failed + * to use default hba_enable_delay_us value for re-enabling + * the host. + */ + host->unipro_lpm = lpm; + } + + return ret; +} + +static int ufs_mtk_pre_link(struct ufs_hba *hba) +{ + int ret; + u32 tmp; + + ret = ufs_mtk_unipro_set_lpm(hba, false); + if (ret) + return ret; + + /* + * Setting PA_Local_TX_LCC_Enable to 0 before link startup + * to make sure that both host and device TX LCC are disabled + * once link startup is completed. + */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); + if (ret) + return ret; + + /* disable deep stall */ + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); + if (ret) + return ret; + + tmp &= ~(1 << 6); + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); + if (ret) + return ret; + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SCRAMBLING), tmp); + + return ret; +} + +static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) +{ + u32 tmp; + + if (enable) { + ufshcd_dme_get(hba, + UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); + tmp = tmp | + (1 << RX_SYMBOL_CLK_GATE_EN) | + (1 << SYS_CLK_GATE_EN) | + (1 << TX_CLK_GATE_EN); + ufshcd_dme_set(hba, + UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); + + ufshcd_dme_get(hba, + UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp); + tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE); + ufshcd_dme_set(hba, + UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp); + } else { + ufshcd_dme_get(hba, + UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); + tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) | + (1 << SYS_CLK_GATE_EN) | + (1 << TX_CLK_GATE_EN)); + ufshcd_dme_set(hba, + UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); + + ufshcd_dme_get(hba, + UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp); + tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE); + ufshcd_dme_set(hba, + UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp); + } +} + +static void ufs_mtk_post_link(struct ufs_hba *hba) +{ + /* enable unipro clock gating feature */ + ufs_mtk_cfg_unipro_cg(hba, true); +} + +static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int ret = 0; + + switch (status) { + case PRE_CHANGE: + ret = ufs_mtk_pre_link(hba); + break; + case POST_CHANGE: + ufs_mtk_post_link(hba); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ufs_mtk_bind_mphy(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = dev_get_priv(hba->dev); + int err = 0; + + err = generic_phy_get_by_index(hba->dev, 0, host->mphy); + + if (IS_ERR(host->mphy)) { + err = PTR_ERR(host->mphy); + if (err != -ENODEV) { + dev_info(hba->dev, "%s: Could NOT get a valid PHY %d\n", __func__, + err); + } + } + + if (err) + host->mphy = NULL; + + return err; +} + +static void ufs_mtk_init_reset_control(struct ufs_hba *hba, + struct reset_ctl **rc, + char *str) +{ + *rc = devm_reset_control_get(hba->dev, str); + if (IS_ERR(*rc)) { + dev_info(hba->dev, "Failed to get reset control %s: %ld\n", + str, PTR_ERR(*rc)); + *rc = NULL; + } +} + +static void ufs_mtk_init_reset(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = dev_get_priv(hba->dev); + + ufs_mtk_init_reset_control(hba, &host->hci_reset, + "hci_rst"); + ufs_mtk_init_reset_control(hba, &host->unipro_reset, + "unipro_rst"); + ufs_mtk_init_reset_control(hba, &host->crypto_reset, + "crypto_rst"); +} + +static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = dev_get_priv(hba->dev); + u32 hw_ip_ver; + + hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); + + if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) || + ((hw_ip_ver & (0xFF << 24)) == 0)) { + hw_ip_ver &= ~(0xFF << 24); + hw_ip_ver |= (0x1 << 28); + } + + host->ip_ver = hw_ip_ver; + + dev_info(hba->dev, "MediaTek UFS IP Version: 0x%x\n", hw_ip_ver); +} + +static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) +{ + struct ufs_mtk_host *host = dev_get_priv(hba->dev); + struct arm_smccc_res res; + int timeout, time_checked = 0; + u32 value; + + if (host->ref_clk_enabled == on) + return 0; + + ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res); + + if (on) { + ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL); + } else { + udelay(10); + ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL); + } + + /* Wait for ack */ + timeout = REFCLK_REQ_TIMEOUT_US; + do { + value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL); + + /* Wait until ack bit equals to req bit */ + if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST)) + goto out; + + udelay(200); + time_checked += 200; + } while (time_checked != timeout); + + dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); + + /* + * If clock on timeout, assume clock is off, notify tfa do clock + * off setting.(keep DIFN disable, release resource) + * If clock off timeout, assume clock will off finally, + * set ref_clk_enabled directly.(keep DIFN disable, keep resource) + */ + if (on) + ufs_mtk_ref_clk_notify(false, POST_CHANGE, res); + else + host->ref_clk_enabled = false; + + return -ETIMEDOUT; + +out: + host->ref_clk_enabled = on; + if (on) + udelay(10); + + ufs_mtk_ref_clk_notify(on, POST_CHANGE, res); + + return 0; +} + +/** + * ufs_mtk_init - bind phy with controller + * @hba: host controller instance + * + * Powers up PHY enabling clocks and regulators. + * + * Returns -ENODEV if binding fails, returns negative error + * on phy power up failure and returns zero on success. + */ +static int ufs_mtk_init(struct ufs_hba *hba) +{ + struct ufs_mtk_host *priv = dev_get_priv(hba->dev); + int err; + + priv->hba = hba; + + err = ufs_mtk_bind_mphy(hba); + if (err) + return -ENODEV; + + ufs_mtk_advertise_quirks(hba); + + ufs_mtk_init_reset(hba); + + // TODO: Clocking + + err = generic_phy_power_on(priv->mphy); + if (err) { + dev_err(hba->dev, "%s: phy init failed, err = %d\n", + __func__, err); + return err; + } + + ufs_mtk_setup_ref_clk(hba, true); + ufs_mtk_get_hw_ip_version(hba); + + return 0; +} + +static int ufs_mtk_device_reset(struct ufs_hba *hba) +{ + struct arm_smccc_res res; + + ufs_mtk_device_reset_ctrl(0, res); + + /* + * The reset signal is active low. UFS devices shall detect + * more than or equal to 1us of positive or negative RST_n + * pulse width. + * + * To be on safe side, keep the reset low for at least 10us. + */ + udelay(13); + + ufs_mtk_device_reset_ctrl(1, res); + + /* Some devices may need time to respond to rst_n */ + mdelay(13); + + dev_dbg(hba->dev, "device reset done\n"); + + return 0; +} + +static struct ufs_hba_ops ufs_mtk_hba_ops = { + .init = ufs_mtk_init, + .hce_enable_notify = ufs_mtk_hce_enable_notify, + .link_startup_notify = ufs_mtk_link_startup_notify, + .device_reset = ufs_mtk_device_reset, +}; + +static int ufs_mtk_probe(struct udevice *dev) +{ + int ret; + + ret = ufshcd_probe(dev, &ufs_mtk_hba_ops); + if (ret) { + dev_err(dev, "ufshcd_probe() failed, ret:%d\n", ret); + return ret; + } + + return 0; +} + +static int ufs_mtk_bind(struct udevice *dev) +{ + struct udevice *scsi_dev; + + return ufs_scsi_bind(dev, &scsi_dev); +} + +static const struct udevice_id ufs_mtk_ids[] = { + { .compatible = "mediatek,mt6878-ufshci" }, + {}, +}; + +U_BOOT_DRIVER(mediatek_ufshci) = { + .name = "mediatek-ufshci", + .id = UCLASS_UFS, + .of_match = ufs_mtk_ids, + .probe = ufs_mtk_probe, + .bind = ufs_mtk_bind, + .priv_auto = sizeof(struct ufs_mtk_host), +}; diff --git a/drivers/ufs/ufs-mediatek.h b/drivers/ufs/ufs-mediatek.h new file mode 100644 index 00000000000..11a83d34c5b --- /dev/null +++ b/drivers/ufs/ufs-mediatek.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 MediaTek Inc. + * Copyright (c) 2025, Igor Belwon + * + * Slimmed down header from Linux: drivers/ufs/host/ufs-mediatek.h + */ + +#ifndef _UFS_MEDIATEK_H +#define _UFS_MEDIATEK_H + +#include +#include + +/* + * MCQ define and struct + */ +#define UFSHCD_MAX_Q_NR 8 +#define MTK_MCQ_INVALID_IRQ 0xFFFF + +/* REG_UFS_MMIO_OPT_CTRL_0 160h */ +#define EHS_EN BIT(0) +#define PFM_IMPV BIT(1) +#define MCQ_MULTI_INTR_EN BIT(2) +#define MCQ_CMB_INTR_EN BIT(3) +#define MCQ_AH8 BIT(4) + +#define MCQ_INTR_EN_MSK (MCQ_MULTI_INTR_EN | MCQ_CMB_INTR_EN) + +/* + * Vendor specific UFSHCI Registers + */ +#define REG_UFS_XOUFS_CTRL 0x140 +#define REG_UFS_REFCLK_CTRL 0x144 +#define REG_UFS_MMIO_OPT_CTRL_0 0x160 +#define REG_UFS_EXTREG 0x2100 +#define REG_UFS_MPHYCTRL 0x2200 +#define REG_UFS_MTK_IP_VER 0x2240 +#define REG_UFS_REJECT_MON 0x22AC +#define REG_UFS_DEBUG_SEL 0x22C0 +#define REG_UFS_PROBE 0x22C8 +#define REG_UFS_DEBUG_SEL_B0 0x22D0 +#define REG_UFS_DEBUG_SEL_B1 0x22D4 +#define REG_UFS_DEBUG_SEL_B2 0x22D8 +#define REG_UFS_DEBUG_SEL_B3 0x22DC + +#define REG_UFS_MTK_SQD 0x2800 +#define REG_UFS_MTK_SQIS 0x2814 +#define REG_UFS_MTK_CQD 0x281C +#define REG_UFS_MTK_CQIS 0x2824 + +#define REG_UFS_MCQ_STRIDE 0x30 + +/* + * Ref-clk control + * + * Values for register REG_UFS_REFCLK_CTRL + */ +#define REFCLK_RELEASE 0x0 +#define REFCLK_REQUEST BIT(0) +#define REFCLK_ACK BIT(1) + +#define REFCLK_REQ_TIMEOUT_US 3000 +#define REFCLK_DEFAULT_WAIT_US 32 + +/* + * Other attributes + */ +#define VS_DEBUGCLOCKENABLE 0xD0A1 +#define VS_SAVEPOWERCONTROL 0xD0A6 +#define VS_UNIPROPOWERDOWNCONTROL 0xD0A8 + +/* + * Vendor specific link state + */ +enum { + VS_LINK_DISABLED = 0, + VS_LINK_DOWN = 1, + VS_LINK_UP = 2, + VS_LINK_HIBERN8 = 3, + VS_LINK_LOST = 4, + VS_LINK_CFG = 5, +}; + +/* + * Vendor specific host controller state + */ +enum { + VS_HCE_RESET = 0, + VS_HCE_BASE = 1, + VS_HCE_OOCPR_WAIT = 2, + VS_HCE_DME_RESET = 3, + VS_HCE_MIDDLE = 4, + VS_HCE_DME_ENABLE = 5, + VS_HCE_DEFAULTS = 6, + VS_HIB_IDLEEN = 7, + VS_HIB_ENTER = 8, + VS_HIB_ENTER_CONF = 9, + VS_HIB_MIDDLE = 10, + VS_HIB_WAITTIMER = 11, + VS_HIB_EXIT_CONF = 12, + VS_HIB_EXIT = 13, +}; + +/* + * VS_DEBUGCLOCKENABLE + */ +enum { + TX_SYMBOL_CLK_REQ_FORCE = 5, +}; + +/* + * VS_SAVEPOWERCONTROL + */ +enum { + RX_SYMBOL_CLK_GATE_EN = 0, + SYS_CLK_GATE_EN = 2, + TX_CLK_GATE_EN = 3, +}; + +/* + * Host capability + */ +enum ufs_mtk_host_caps { + UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0, + UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1, + UFS_MTK_CAP_DISABLE_AH8 = 1 << 2, + UFS_MTK_CAP_BROKEN_VCC = 1 << 3, + + /* + * Override UFS_MTK_CAP_BROKEN_VCC's behavior to + * allow vccqx upstream to enter LPM + */ + UFS_MTK_CAP_ALLOW_VCCQX_LPM = 1 << 5, + UFS_MTK_CAP_PMC_VIA_FASTAUTO = 1 << 6, + UFS_MTK_CAP_TX_SKEW_FIX = 1 << 7, + UFS_MTK_CAP_DISABLE_MCQ = 1 << 8, + /* Control MTCMOS with RTFF */ + UFS_MTK_CAP_RTFF_MTCMOS = 1 << 9, + + UFS_MTK_CAP_MCQ_BROKEN_RTC = 1 << 10, +}; + +struct ufs_mtk_hw_ver { + u8 step; + u8 minor; + u8 major; +}; + +struct ufs_mtk_mcq_intr_info { + struct ufs_hba *hba; + u32 irq; + u8 qid; +}; + +struct ufs_mtk_host { + struct phy *mphy; + struct reset_ctl *unipro_reset; + struct reset_ctl *crypto_reset; + struct reset_ctl *hci_reset; + struct ufs_hba *hba; + struct ufs_mtk_crypt_cfg *crypt; + struct clk_bulk clks; + struct ufs_mtk_hw_ver hw_ver; + enum ufs_mtk_host_caps caps; + bool mphy_powered_on; + bool unipro_lpm; + bool ref_clk_enabled; + bool is_clks_enabled; + u16 ref_clk_ungating_wait_us; + u16 ref_clk_gating_wait_us; + u32 ip_ver; + bool legacy_ip_ver; + + bool mcq_set_intr; + bool is_mcq_intr_enabled; + int mcq_nr_intr; + struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR]; +}; + +/* MTK delay of autosuspend: 500 ms */ +#define MTK_RPM_AUTOSUSPEND_DELAY_MS 500 + +/* MTK RTT support number */ +#define MTK_MAX_NUM_RTT 2 + +/* UFSHCI MTK ip version value */ +enum { + /* UFSHCI 3.1 */ + IP_VER_MT6983 = 0x10360000, + IP_VER_MT6878 = 0x10420200, + + /* UFSHCI 4.0 */ + IP_VER_MT6897 = 0x10440000, + IP_VER_MT6989 = 0x10450000, + IP_VER_MT6899 = 0x10450100, + IP_VER_MT6991_A0 = 0x10460000, + IP_VER_MT6991_B0 = 0x10470000, + IP_VER_MT6993 = 0x10480000, + + IP_VER_NONE = 0xFFFFFFFF +}; + +enum ip_ver_legacy { + IP_LEGACY_VER_MT6781 = 0x10380000, + IP_LEGACY_VER_MT6879 = 0x10360000, + IP_LEGACY_VER_MT6893 = 0x20160706 +}; + +#endif /* !_UFS_MEDIATEK_H */ -- cgit v1.2.3 From 928269494c8495eee0a74c9685cc6c222e8ee0b7 Mon Sep 17 00:00:00 2001 From: Igor Belwon Date: Sat, 11 Oct 2025 21:10:07 +0200 Subject: MAINTAINERS: Add UFS to MediaTek section Add the UFS driver files to the ARM MediaTek section in MAINTAINERS. Add myself as its maintainer. Signed-off-by: Igor Belwon Link: https://patch.msgid.link/20251011-mtk-ufs-uboot-v1-4-a05f991ee150@mentallysanemainliners.org Signed-off-by: Neil Armstrong --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index ee4ab3711b3..cb4cffce23e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -410,6 +410,7 @@ ARM MEDIATEK M: Ryder Lee M: Weijie Gao M: Chunfeng Yun +M: Igor Belwon R: GSS_MTK_Uboot_upstream S: Maintained F: arch/arm/mach-mediatek/ @@ -435,6 +436,7 @@ F: drivers/spi/mtk_snfi_spi.c F: drivers/spi/mtk_spim.c F: drivers/spi/mtk_snor.c F: drivers/timer/mtk_timer.c +F: drivers/ufs/ufs-mediatek* F: drivers/usb/host/xhci-mtk.c F: drivers/usb/mtu3/ F: drivers/watchdog/mtk_wdt.c -- cgit v1.2.3 From 9b37b6001759c2727568255de1169c942b9aa185 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Tue, 28 Oct 2025 15:22:21 +0100 Subject: ufs: core: Keep Makefile and Kconfig list sorted Sort the Makefile and Kconfig lists alphabetically. No functional change. Reviewed-by: Neil Armstrong Signed-off-by: Marek Vasut Link: https://patch.msgid.link/20251028142335.18125-1-marek.vasut+renesas@mailbox.org Signed-off-by: Neil Armstrong --- drivers/ufs/Kconfig | 38 +++++++++++++++++++------------------- drivers/ufs/Makefile | 2 +- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig index b030a972b98..6b980bce1e9 100644 --- a/drivers/ufs/Kconfig +++ b/drivers/ufs/Kconfig @@ -29,17 +29,6 @@ config UFS_MEDIATEK If unsure, say N. -config UFS_PCI - bool "PCI bus based UFS Controller support" - depends on PCI && UFS - help - This selects the PCI UFS Host Controller Interface. Select this if - you have UFS Host Controller with PCI Interface. - - If you have a controller with this interface, say Y here. - - If unsure, say N. - config QCOM_UFS bool "Qualcomm Host Controller driver for UFS" depends on UFS && ARCH_SNAPDRAGON @@ -62,6 +51,25 @@ config TI_J721E_UFS This selects the glue layer driver for Cadence controller present on TI's J721E devices. +config UFS_AMD_VERSAL2 + bool "AMD Versal Gen 2 UFS controller platform driver" + depends on UFS && ZYNQMP_FIRMWARE + help + This selects the AMD specific additions to UFSHCD platform driver. + UFS host on AMD needs some vendor specific configuration before accessing + the hardware. + +config UFS_PCI + bool "PCI bus based UFS Controller support" + depends on PCI && UFS + help + This selects the PCI UFS Host Controller Interface. Select this if + you have UFS Host Controller with PCI Interface. + + If you have a controller with this interface, say Y here. + + If unsure, say N. + config UFS_RENESAS bool "Renesas specific hooks to UFS controller platform driver" depends on UFS @@ -71,12 +79,4 @@ config UFS_RENESAS UFS host on Renesas needs some vendor specific configuration before accessing the hardware. -config UFS_AMD_VERSAL2 - bool "AMD Versal Gen 2 UFS controller platform driver" - depends on UFS && ZYNQMP_FIRMWARE - help - This selects the AMD specific additions to UFSHCD platform driver. - UFS host on AMD needs some vendor specific configuration before accessing - the hardware. - endmenu diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile index bbee66caa8a..04892d75fe6 100644 --- a/drivers/ufs/Makefile +++ b/drivers/ufs/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_CADENCE_UFS) += cdns-platform.o obj-$(CONFIG_UFS_MEDIATEK) += ufs-mediatek.o obj-$(CONFIG_QCOM_UFS) += ufs-qcom.o obj-$(CONFIG_TI_J721E_UFS) += ti-j721e-ufs.o +obj-$(CONFIG_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o obj-$(CONFIG_UFS_PCI) += ufs-pci.o obj-$(CONFIG_UFS_RENESAS) += ufs-renesas.o obj-$(CONFIG_ROCKCHIP_UFS) += ufs-rockchip.o -obj-$(CONFIG_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o -- cgit v1.2.3 From ea97b856179338a51cb6fd3fef07680e5e2c662a Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Tue, 28 Oct 2025 15:22:22 +0100 Subject: ufs: core: Fold ufs-uclass into ufs Move the few lines of ufs-uclass.c into ufs.c and remove the ufs-uclass.c . No functional change. Reviewed-by: Neil Armstrong Signed-off-by: Marek Vasut Link: https://patch.msgid.link/20251028142335.18125-2-marek.vasut+renesas@mailbox.org Signed-off-by: Neil Armstrong --- drivers/ufs/Makefile | 2 +- drivers/ufs/ufs-uclass.c | 17 ----------------- drivers/ufs/ufs.c | 6 ++++++ 3 files changed, 7 insertions(+), 18 deletions(-) delete mode 100644 drivers/ufs/ufs-uclass.c diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile index 04892d75fe6..9fcb53ee6af 100644 --- a/drivers/ufs/Makefile +++ b/drivers/ufs/Makefile @@ -3,7 +3,7 @@ # Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com # -obj-$(CONFIG_UFS) += ufs.o ufs-uclass.o +obj-$(CONFIG_UFS) += ufs.o obj-$(CONFIG_CADENCE_UFS) += cdns-platform.o obj-$(CONFIG_UFS_MEDIATEK) += ufs-mediatek.o obj-$(CONFIG_QCOM_UFS) += ufs-qcom.o diff --git a/drivers/ufs/ufs-uclass.c b/drivers/ufs/ufs-uclass.c deleted file mode 100644 index 334bfcfa06a..00000000000 --- a/drivers/ufs/ufs-uclass.c +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/** - * ufs-uclass.c - Universal Flash Storage (UFS) Uclass driver - * - * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com - */ - -#define LOG_CATEGORY UCLASS_UFS - -#include "ufs.h" -#include - -UCLASS_DRIVER(ufs) = { - .id = UCLASS_UFS, - .name = "ufs", - .per_device_auto = sizeof(struct ufs_hba), -}; diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c index b4f994c5bc0..6d1f9ac1d5f 100644 --- a/drivers/ufs/ufs.c +++ b/drivers/ufs/ufs.c @@ -2302,3 +2302,9 @@ U_BOOT_DRIVER(ufs_scsi) = { .name = "ufs_scsi", .ops = &ufs_ops, }; + +UCLASS_DRIVER(ufs) = { + .id = UCLASS_UFS, + .name = "ufs", + .per_device_auto = sizeof(struct ufs_hba), +}; -- cgit v1.2.3 From 1de63f1be48b6af0d2e90cdadfa9c0d3b9abf3b7 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Tue, 28 Oct 2025 15:22:23 +0100 Subject: ufs: core: Rename ufs.c to ufs-uclass.c Previous commit folded existing ufs-uclass.c into ufs.c , which produced a nice and reviewable change , but also broke the UCLASS should be in *-uclass.c pattern. Fix it. Keep the change separate from the previous one to make this reviewable. Signed-off-by: Marek Vasut Link: https://patch.msgid.link/20251028142335.18125-3-marek.vasut+renesas@mailbox.org Signed-off-by: Neil Armstrong --- drivers/ufs/Makefile | 2 +- drivers/ufs/ufs.c | 2310 -------------------------------------------------- 2 files changed, 1 insertion(+), 2311 deletions(-) delete mode 100644 drivers/ufs/ufs.c diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile index 9fcb53ee6af..4915b12e623 100644 --- a/drivers/ufs/Makefile +++ b/drivers/ufs/Makefile @@ -3,7 +3,7 @@ # Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com # -obj-$(CONFIG_UFS) += ufs.o +obj-$(CONFIG_UFS) += ufs-uclass.o obj-$(CONFIG_CADENCE_UFS) += cdns-platform.o obj-$(CONFIG_UFS_MEDIATEK) += ufs-mediatek.o obj-$(CONFIG_QCOM_UFS) += ufs-qcom.o diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c deleted file mode 100644 index 6d1f9ac1d5f..00000000000 --- a/drivers/ufs/ufs.c +++ /dev/null @@ -1,2310 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/** - * ufs.c - Universal Flash Storage (UFS) driver - * - * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported - * to u-boot. - * - * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ufs.h" - -#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ - UTP_TASK_REQ_COMPL |\ - UFSHCD_ERROR_MASK) -/* maximum number of link-startup retries */ -#define DME_LINKSTARTUP_RETRIES 3 - -/* maximum number of retries for a general UIC command */ -#define UFS_UIC_COMMAND_RETRIES 3 - -/* Query request retries */ -#define QUERY_REQ_RETRIES 3 -/* Query request timeout */ -#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ - -/* maximum timeout in ms for a general UIC command */ -#define UFS_UIC_CMD_TIMEOUT 1000 -/* NOP OUT retries waiting for NOP IN response */ -#define NOP_OUT_RETRIES 10 -/* Timeout after 30 msecs if NOP OUT hangs without response */ -#define NOP_OUT_TIMEOUT 30 /* msecs */ - -/* Only use one Task Tag for all requests */ -#define TASK_TAG 0 - -/* Expose the flag value from utp_upiu_query.value */ -#define MASK_QUERY_UPIU_FLAG_LOC 0xFF - -#define MAX_PRDT_ENTRY 262144 - -/* maximum bytes per request */ -#define UFS_MAX_BYTES (128 * 256 * 1024) - -static inline bool ufshcd_is_hba_active(struct ufs_hba *hba); -static inline void ufshcd_hba_stop(struct ufs_hba *hba); -static int ufshcd_hba_enable(struct ufs_hba *hba); - -/* - * ufshcd_wait_for_register - wait for register value to change - */ -static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, - u32 val, unsigned long timeout_ms) -{ - int err = 0; - unsigned long start = get_timer(0); - - /* ignore bits that we don't intend to wait on */ - val = val & mask; - - while ((ufshcd_readl(hba, reg) & mask) != val) { - if (get_timer(start) > timeout_ms) { - if ((ufshcd_readl(hba, reg) & mask) != val) - err = -ETIMEDOUT; - break; - } - } - - return err; -} - -/** - * ufshcd_init_pwr_info - setting the POR (power on reset) - * values in hba power info - */ -static void ufshcd_init_pwr_info(struct ufs_hba *hba) -{ - hba->pwr_info.gear_rx = UFS_PWM_G1; - hba->pwr_info.gear_tx = UFS_PWM_G1; - hba->pwr_info.lane_rx = 1; - hba->pwr_info.lane_tx = 1; - hba->pwr_info.pwr_rx = SLOWAUTO_MODE; - hba->pwr_info.pwr_tx = SLOWAUTO_MODE; - hba->pwr_info.hs_rate = 0; -} - -/** - * ufshcd_print_pwr_info - print power params as saved in hba - * power info - */ -static void ufshcd_print_pwr_info(struct ufs_hba *hba) -{ - static const char * const names[] = { - "INVALID MODE", - "FAST MODE", - "SLOW_MODE", - "INVALID MODE", - "FASTAUTO_MODE", - "SLOWAUTO_MODE", - "INVALID MODE", - }; - - dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", - hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, - hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, - names[hba->pwr_info.pwr_rx], - names[hba->pwr_info.pwr_tx], - hba->pwr_info.hs_rate); -} - -static void ufshcd_device_reset(struct ufs_hba *hba) -{ - ufshcd_vops_device_reset(hba); -} - -/** - * ufshcd_ready_for_uic_cmd - Check if controller is ready - * to accept UIC commands - */ -static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) -{ - if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) - return true; - else - return false; -} - -/** - * ufshcd_get_uic_cmd_result - Get the UIC command result - */ -static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) -{ - return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & - MASK_UIC_COMMAND_RESULT; -} - -/** - * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command - */ -static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) -{ - return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); -} - -/** - * ufshcd_is_device_present - Check if any device connected to - * the host controller - */ -static inline bool ufshcd_is_device_present(struct ufs_hba *hba) -{ - return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & - DEVICE_PRESENT) ? true : false; -} - -/** - * ufshcd_send_uic_cmd - UFS Interconnect layer command API - * - */ -static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) -{ - unsigned long start = 0; - u32 intr_status; - u32 enabled_intr_status; - - if (!ufshcd_ready_for_uic_cmd(hba)) { - dev_err(hba->dev, - "Controller not ready to accept UIC commands\n"); - return -EIO; - } - - debug("sending uic command:%d\n", uic_cmd->command); - - /* Write Args */ - ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); - ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); - ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); - - /* Write UIC Cmd */ - ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, - REG_UIC_COMMAND); - - start = get_timer(0); - do { - intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - enabled_intr_status = intr_status & hba->intr_mask; - ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); - - if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { - dev_err(hba->dev, - "Timedout waiting for UIC response\n"); - - return -ETIMEDOUT; - } - - if (enabled_intr_status & UFSHCD_ERROR_MASK) { - dev_err(hba->dev, "Error in status:%08x\n", - enabled_intr_status); - - return -1; - } - } while (!(enabled_intr_status & UFSHCD_UIC_MASK)); - - uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba); - uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba); - - debug("Sent successfully\n"); - - return 0; -} - -int ufshcd_dme_configure_adapt(struct ufs_hba *hba, - int agreed_gear, - int adapt_val) -{ - int ret; - - if (agreed_gear < UFS_HS_G4) - adapt_val = PA_NO_ADAPT; - - ret = ufshcd_dme_set(hba, - UIC_ARG_MIB(PA_TXHSADAPTTYPE), - adapt_val); - return ret; -} - -/** - * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET - * - */ -int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, - u32 mib_val, u8 peer) -{ - struct uic_command uic_cmd = {0}; - static const char *const action[] = { - "dme-set", - "dme-peer-set" - }; - const char *set = action[!!peer]; - int ret; - int retries = UFS_UIC_COMMAND_RETRIES; - - uic_cmd.command = peer ? - UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; - uic_cmd.argument1 = attr_sel; - uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); - uic_cmd.argument3 = mib_val; - - do { - /* for peer attributes we retry upon failure */ - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); - if (ret) - dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", - set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); - } while (ret && peer && --retries); - - if (ret) - dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", - set, UIC_GET_ATTR_ID(attr_sel), mib_val, - UFS_UIC_COMMAND_RETRIES - retries); - - return ret; -} - -/** - * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET - * - */ -int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, - u32 *mib_val, u8 peer) -{ - struct uic_command uic_cmd = {0}; - static const char *const action[] = { - "dme-get", - "dme-peer-get" - }; - const char *get = action[!!peer]; - int ret; - int retries = UFS_UIC_COMMAND_RETRIES; - - uic_cmd.command = peer ? - UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; - uic_cmd.argument1 = attr_sel; - - do { - /* for peer attributes we retry upon failure */ - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); - if (ret) - dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", - get, UIC_GET_ATTR_ID(attr_sel), ret); - } while (ret && peer && --retries); - - if (ret) - dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", - get, UIC_GET_ATTR_ID(attr_sel), - UFS_UIC_COMMAND_RETRIES - retries); - - if (mib_val && !ret) - *mib_val = uic_cmd.argument3; - - return ret; -} - -static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) -{ - u32 tx_lanes, i, err = 0; - - if (!peer) - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), - &tx_lanes); - else - ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), - &tx_lanes); - for (i = 0; i < tx_lanes; i++) { - unsigned int val = UIC_ARG_MIB_SEL(TX_LCC_ENABLE, - UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)); - if (!peer) - err = ufshcd_dme_set(hba, val, 0); - else - err = ufshcd_dme_peer_set(hba, val, 0); - if (err) { - dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d\n", - __func__, peer, i, err); - break; - } - } - - return err; -} - -static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) -{ - return ufshcd_disable_tx_lcc(hba, true); -} - -/** - * ufshcd_dme_link_startup - Notify Unipro to perform link startup - * - */ -static int ufshcd_dme_link_startup(struct ufs_hba *hba) -{ - struct uic_command uic_cmd = {0}; - int ret; - - uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; - - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); - if (ret) - dev_dbg(hba->dev, - "dme-link-startup: error code %d\n", ret); - return ret; -} - -int ufshcd_dme_enable(struct ufs_hba *hba) -{ - struct uic_command uic_cmd = {0}; - int ret; - - uic_cmd.command = UIC_CMD_DME_ENABLE; - - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); - if (ret) - dev_dbg(hba->dev, - "dme-enable: error code %d\n", ret); - - return ret; -} - -int ufshcd_dme_reset(struct ufs_hba *hba) -{ - struct uic_command uic_cmd = {0}; - int ret; - - uic_cmd.command = UIC_CMD_DME_RESET; - - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); - if (ret) - dev_dbg(hba->dev, - "dme-reset: error code %d\n", ret); - - return ret; -} - -/** - * ufshcd_disable_intr_aggr - Disables interrupt aggregation. - * - */ -static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) -{ - ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); -} - -/** - * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY - */ -static inline int ufshcd_get_lists_status(u32 reg) -{ - return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); -} - -/** - * ufshcd_enable_run_stop_reg - Enable run-stop registers, - * When run-stop registers are set to 1, it indicates the - * host controller that it can process the requests - */ -static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) -{ - ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, - REG_UTP_TASK_REQ_LIST_RUN_STOP); - ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, - REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); -} - -/** - * ufshcd_enable_intr - enable interrupts - */ -static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) -{ - u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - u32 rw; - - if (hba->version == UFSHCI_VERSION_10) { - rw = set & INTERRUPT_MASK_RW_VER_10; - set = rw | ((set ^ intrs) & intrs); - } else { - set |= intrs; - } - - ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); - - hba->intr_mask = set; -} - -/** - * ufshcd_make_hba_operational - Make UFS controller operational - * - * To bring UFS host controller to operational state, - * 1. Enable required interrupts - * 2. Configure interrupt aggregation - * 3. Program UTRL and UTMRL base address - * 4. Configure run-stop-registers - * - */ -static int ufshcd_make_hba_operational(struct ufs_hba *hba) -{ - int err = 0; - u32 reg; - - /* Enable required interrupts */ - ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); - - /* Disable interrupt aggregation */ - ufshcd_disable_intr_aggr(hba); - - /* Configure UTRL and UTMRL base address registers */ - ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl), - REG_UTP_TRANSFER_REQ_LIST_BASE_L); - ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl), - REG_UTP_TRANSFER_REQ_LIST_BASE_H); - ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl), - REG_UTP_TASK_REQ_LIST_BASE_L); - ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl), - REG_UTP_TASK_REQ_LIST_BASE_H); - - /* - * Make sure base address and interrupt setup are updated before - * enabling the run/stop registers below. - */ - wmb(); - - /* - * UCRDY, UTMRLDY and UTRLRDY bits must be 1 - */ - reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); - if (!(ufshcd_get_lists_status(reg))) { - ufshcd_enable_run_stop_reg(hba); - } else { - dev_err(hba->dev, - "Host controller not ready to process requests\n"); - err = -EIO; - goto out; - } - -out: - return err; -} - -/** - * ufshcd_link_startup - Initialize unipro link startup - */ -static int ufshcd_link_startup(struct ufs_hba *hba) -{ - int ret; - int retries = DME_LINKSTARTUP_RETRIES; - - do { - ufshcd_ops_link_startup_notify(hba, PRE_CHANGE); - - ret = ufshcd_dme_link_startup(hba); - - /* check if device is detected by inter-connect layer */ - if (!ret && !ufshcd_is_device_present(hba)) { - dev_err(hba->dev, "%s: Device not present\n", __func__); - ret = -ENXIO; - goto out; - } - - /* - * DME link lost indication is only received when link is up, - * but we can't be sure if the link is up until link startup - * succeeds. So reset the local Uni-Pro and try again. - */ - if (ret && ufshcd_hba_enable(hba)) - goto out; - } while (ret && retries--); - - if (ret) - /* failed to get the link up... retire */ - goto out; - - /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ - ufshcd_init_pwr_info(hba); - - if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { - ret = ufshcd_disable_device_tx_lcc(hba); - if (ret) - goto out; - } - - /* Include any host controller configuration via UIC commands */ - ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE); - if (ret) - goto out; - - /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */ - ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); - ret = ufshcd_make_hba_operational(hba); -out: - if (ret) - dev_err(hba->dev, "link startup failed %d\n", ret); - - return ret; -} - -/** - * ufshcd_hba_stop - Send controller to reset state - */ -static inline void ufshcd_hba_stop(struct ufs_hba *hba) -{ - int err; - - ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); - err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, - CONTROLLER_ENABLE, CONTROLLER_DISABLE, - 10); - if (err) - dev_err(hba->dev, "%s: Controller disable failed\n", __func__); -} - -/** - * ufshcd_is_hba_active - Get controller state - */ -static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) -{ - return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE) - ? false : true; -} - -/** - * ufshcd_hba_start - Start controller initialization sequence - */ -static inline void ufshcd_hba_start(struct ufs_hba *hba) -{ - ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); -} - -/** - * ufshcd_hba_enable - initialize the controller - */ -static int ufshcd_hba_enable(struct ufs_hba *hba) -{ - int retry; - - if (!ufshcd_is_hba_active(hba)) - /* change controller state to "reset state" */ - ufshcd_hba_stop(hba); - - ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE); - - /* start controller initialization sequence */ - ufshcd_hba_start(hba); - - /* - * To initialize a UFS host controller HCE bit must be set to 1. - * During initialization the HCE bit value changes from 1->0->1. - * When the host controller completes initialization sequence - * it sets the value of HCE bit to 1. The same HCE bit is read back - * to check if the controller has completed initialization sequence. - * So without this delay the value HCE = 1, set in the previous - * instruction might be read back. - * This delay can be changed based on the controller. - */ - mdelay(1); - - /* wait for the host controller to complete initialization */ - retry = 10; - while (ufshcd_is_hba_active(hba)) { - if (retry) { - retry--; - } else { - dev_err(hba->dev, "Controller enable failed\n"); - return -EIO; - } - mdelay(5); - } - - /* enable UIC related interrupts */ - ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); - - ufshcd_ops_hce_enable_notify(hba, POST_CHANGE); - - return 0; -} - -/** - * ufshcd_host_memory_configure - configure local reference block with - * memory offsets - */ -static void ufshcd_host_memory_configure(struct ufs_hba *hba) -{ - struct utp_transfer_req_desc *utrdlp; - dma_addr_t cmd_desc_dma_addr; - u16 response_offset; - u16 prdt_offset; - - utrdlp = hba->utrdl; - cmd_desc_dma_addr = (dma_addr_t)hba->ucdl; - - utrdlp->command_desc_base_addr_lo = - cpu_to_le32(lower_32_bits(cmd_desc_dma_addr)); - utrdlp->command_desc_base_addr_hi = - cpu_to_le32(upper_32_bits(cmd_desc_dma_addr)); - - response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu); - prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); - - utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2); - utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2); - utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); - - hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl; - hba->ucd_rsp_ptr = - (struct utp_upiu_rsp *)&hba->ucdl->response_upiu; - hba->ucd_prdt_ptr = - (struct ufshcd_sg_entry *)&hba->ucdl->prd_table; -} - -/** - * ufshcd_memory_alloc - allocate memory for host memory space data structures - */ -static int ufshcd_memory_alloc(struct ufs_hba *hba) -{ - /* Allocate one Transfer Request Descriptor - * Should be aligned to 1k boundary. - */ - hba->utrdl = memalign(1024, - ALIGN(sizeof(struct utp_transfer_req_desc), - ARCH_DMA_MINALIGN)); - if (!hba->utrdl) { - dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n"); - return -ENOMEM; - } - - /* Allocate one Command Descriptor - * Should be aligned to 1k boundary. - */ - hba->ucdl = memalign(1024, - ALIGN(sizeof(struct utp_transfer_cmd_desc), - ARCH_DMA_MINALIGN)); - if (!hba->ucdl) { - dev_err(hba->dev, "Command descriptor memory allocation failed\n"); - return -ENOMEM; - } - - return 0; -} - -/** - * ufshcd_get_intr_mask - Get the interrupt bit mask - */ -static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) -{ - u32 intr_mask = 0; - - switch (hba->version) { - case UFSHCI_VERSION_10: - intr_mask = INTERRUPT_MASK_ALL_VER_10; - break; - case UFSHCI_VERSION_11: - case UFSHCI_VERSION_20: - intr_mask = INTERRUPT_MASK_ALL_VER_11; - break; - case UFSHCI_VERSION_21: - default: - intr_mask = INTERRUPT_MASK_ALL_VER_21; - break; - } - - return intr_mask; -} - -/** - * ufshcd_get_ufs_version - Get the UFS version supported by the HBA - */ -static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) -{ - return ufshcd_readl(hba, REG_UFS_VERSION); -} - -/** - * ufshcd_get_upmcrs - Get the power mode change request status - */ -static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) -{ - return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; -} - -/** - * ufshcd_cache_flush - Flush cache - * - * Flush cache in aligned address..address+size range. - */ -static void ufshcd_cache_flush(void *addr, unsigned long size) -{ - uintptr_t start_addr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1); - uintptr_t end_addr = ALIGN((uintptr_t)addr + size, ARCH_DMA_MINALIGN); - - flush_dcache_range(start_addr, end_addr); -} - -/** - * ufshcd_cache_invalidate - Invalidate cache - * - * Invalidate cache in aligned address..address+size range. - */ -static void ufshcd_cache_invalidate(void *addr, unsigned long size) -{ - uintptr_t start_addr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1); - uintptr_t end_addr = ALIGN((uintptr_t)addr + size, ARCH_DMA_MINALIGN); - - invalidate_dcache_range(start_addr, end_addr); -} - -/** - * ufshcd_prepare_req_desc_hdr() - Fills the requests header - * descriptor according to request - */ -static void ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, - u32 *upiu_flags, - enum dma_data_direction cmd_dir) -{ - struct utp_transfer_req_desc *req_desc = hba->utrdl; - u32 data_direction; - u32 dword_0; - - if (cmd_dir == DMA_FROM_DEVICE) { - data_direction = UTP_DEVICE_TO_HOST; - *upiu_flags = UPIU_CMD_FLAGS_READ; - } else if (cmd_dir == DMA_TO_DEVICE) { - data_direction = UTP_HOST_TO_DEVICE; - *upiu_flags = UPIU_CMD_FLAGS_WRITE; - } else { - data_direction = UTP_NO_DATA_TRANSFER; - *upiu_flags = UPIU_CMD_FLAGS_NONE; - } - - dword_0 = (data_direction << UPIU_DATA_DIRECTION_OFFSET) - | (0x1 << UPIU_COMMAND_TYPE_OFFSET); - - /* Enable Interrupt for command */ - dword_0 |= UTP_REQ_DESC_INT_CMD; - - /* Transfer request descriptor header fields */ - req_desc->header.dword_0 = cpu_to_le32(dword_0); - /* dword_1 is reserved, hence it is set to 0 */ - req_desc->header.dword_1 = 0; - /* - * assigning invalid value for command status. Controller - * updates OCS on command completion, with the command - * status - */ - req_desc->header.dword_2 = - cpu_to_le32(OCS_INVALID_COMMAND_STATUS); - /* dword_3 is reserved, hence it is set to 0 */ - req_desc->header.dword_3 = 0; - - req_desc->prd_table_length = 0; - - ufshcd_cache_flush(req_desc, sizeof(*req_desc)); -} - -static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, - u32 upiu_flags) -{ - struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; - struct ufs_query *query = &hba->dev_cmd.query; - u16 len = be16_to_cpu(query->request.upiu_req.length); - - /* Query request header */ - ucd_req_ptr->header.dword_0 = - UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ, - upiu_flags, 0, TASK_TAG); - ucd_req_ptr->header.dword_1 = - UPIU_HEADER_DWORD(0, query->request.query_func, - 0, 0); - - /* Data segment length only need for WRITE_DESC */ - if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) - ucd_req_ptr->header.dword_2 = - UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); - else - ucd_req_ptr->header.dword_2 = 0; - - /* Copy the Query Request buffer as is */ - memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE); - - /* Copy the Descriptor */ - if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) { - memcpy(ucd_req_ptr + 1, query->descriptor, len); - ufshcd_cache_flush(ucd_req_ptr, 2 * sizeof(*ucd_req_ptr)); - } else { - ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr)); - } - - memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); - ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); -} - -static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba) -{ - struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; - - memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); - - /* command descriptor fields */ - ucd_req_ptr->header.dword_0 = - UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, TASK_TAG); - /* clear rest of the fields of basic header */ - ucd_req_ptr->header.dword_1 = 0; - ucd_req_ptr->header.dword_2 = 0; - - memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); - - ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr)); - ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); -} - -/** - * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU) - * for Device Management Purposes - */ -static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, - enum dev_cmd_type cmd_type) -{ - u32 upiu_flags; - int ret = 0; - - hba->dev_cmd.type = cmd_type; - - ufshcd_prepare_req_desc_hdr(hba, &upiu_flags, DMA_NONE); - switch (cmd_type) { - case DEV_CMD_TYPE_QUERY: - ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags); - break; - case DEV_CMD_TYPE_NOP: - ufshcd_prepare_utp_nop_upiu(hba); - break; - default: - ret = -EINVAL; - } - - return ret; -} - -static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) -{ - unsigned long start; - u32 intr_status; - u32 enabled_intr_status; - - ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); - - /* Make sure doorbell reg is updated before reading interrupt status */ - wmb(); - - start = get_timer(0); - do { - intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - enabled_intr_status = intr_status & hba->intr_mask; - ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); - - if (get_timer(start) > QUERY_REQ_TIMEOUT) { - dev_err(hba->dev, - "Timedout waiting for UTP response\n"); - - return -ETIMEDOUT; - } - - if (enabled_intr_status & UFSHCD_ERROR_MASK) { - dev_err(hba->dev, "Error in status:%08x\n", - enabled_intr_status); - - return -1; - } - } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL)); - - return 0; -} - -/** - * ufshcd_get_req_rsp - returns the TR response transaction type - */ -static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) -{ - ufshcd_cache_invalidate(ucd_rsp_ptr, sizeof(*ucd_rsp_ptr)); - - return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; -} - -/** - * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status - * - */ -static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba) -{ - struct utp_transfer_req_desc *req_desc = hba->utrdl; - - ufshcd_cache_invalidate(req_desc, sizeof(*req_desc)); - - return le32_to_cpu(req_desc->header.dword_2) & MASK_OCS; -} - -static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) -{ - return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; -} - -static int ufshcd_check_query_response(struct ufs_hba *hba) -{ - struct ufs_query_res *query_res = &hba->dev_cmd.query.response; - - /* Get the UPIU response */ - query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >> - UPIU_RSP_CODE_OFFSET; - return query_res->response; -} - -/** - * ufshcd_copy_query_response() - Copy the Query Response and the data - * descriptor - */ -static int ufshcd_copy_query_response(struct ufs_hba *hba) -{ - struct ufs_query_res *query_res = &hba->dev_cmd.query.response; - - memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); - - /* Get the descriptor */ - if (hba->dev_cmd.query.descriptor && - hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { - u8 *descp = (u8 *)hba->ucd_rsp_ptr + - GENERAL_UPIU_REQUEST_SIZE; - u16 resp_len; - u16 buf_len; - - /* data segment length */ - resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) & - MASK_QUERY_DATA_SEG_LEN; - buf_len = - be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length); - if (likely(buf_len >= resp_len)) { - memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); - } else { - dev_warn(hba->dev, - "%s: Response size is bigger than buffer\n", - __func__); - return -EINVAL; - } - } - - return 0; -} - -/** - * ufshcd_exec_dev_cmd - API for sending device management requests - */ -static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, - int timeout) -{ - int err; - int resp; - - err = ufshcd_comp_devman_upiu(hba, cmd_type); - if (err) - return err; - - err = ufshcd_send_command(hba, TASK_TAG); - if (err) - return err; - - err = ufshcd_get_tr_ocs(hba); - if (err) { - dev_err(hba->dev, "Error in OCS:%d\n", err); - return -EINVAL; - } - - resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); - switch (resp) { - case UPIU_TRANSACTION_NOP_IN: - break; - case UPIU_TRANSACTION_QUERY_RSP: - err = ufshcd_check_query_response(hba); - if (!err) - err = ufshcd_copy_query_response(hba); - break; - case UPIU_TRANSACTION_REJECT_UPIU: - /* TODO: handle Reject UPIU Response */ - err = -EPERM; - dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", - __func__); - break; - default: - err = -EINVAL; - dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", - __func__, resp); - } - - return err; -} - -/** - * ufshcd_init_query() - init the query response and request parameters - */ -static inline void ufshcd_init_query(struct ufs_hba *hba, - struct ufs_query_req **request, - struct ufs_query_res **response, - enum query_opcode opcode, - u8 idn, u8 index, u8 selector) -{ - *request = &hba->dev_cmd.query.request; - *response = &hba->dev_cmd.query.response; - memset(*request, 0, sizeof(struct ufs_query_req)); - memset(*response, 0, sizeof(struct ufs_query_res)); - (*request)->upiu_req.opcode = opcode; - (*request)->upiu_req.idn = idn; - (*request)->upiu_req.index = index; - (*request)->upiu_req.selector = selector; -} - -/** - * ufshcd_query_flag() - API function for sending flag query requests - */ -static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, - enum flag_idn idn, bool *flag_res) -{ - struct ufs_query_req *request = NULL; - struct ufs_query_res *response = NULL; - int err, index = 0, selector = 0; - int timeout = QUERY_REQ_TIMEOUT; - - ufshcd_init_query(hba, &request, &response, opcode, idn, index, - selector); - - switch (opcode) { - case UPIU_QUERY_OPCODE_SET_FLAG: - case UPIU_QUERY_OPCODE_CLEAR_FLAG: - case UPIU_QUERY_OPCODE_TOGGLE_FLAG: - request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; - break; - case UPIU_QUERY_OPCODE_READ_FLAG: - request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; - if (!flag_res) { - /* No dummy reads */ - dev_err(hba->dev, "%s: Invalid argument for read request\n", - __func__); - err = -EINVAL; - goto out; - } - break; - default: - dev_err(hba->dev, - "%s: Expected query flag opcode but got = %d\n", - __func__, opcode); - err = -EINVAL; - goto out; - } - - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); - - if (err) { - dev_err(hba->dev, - "%s: Sending flag query for idn %d failed, err = %d\n", - __func__, idn, err); - goto out; - } - - if (flag_res) - *flag_res = (be32_to_cpu(response->upiu_res.value) & - MASK_QUERY_UPIU_FLAG_LOC) & 0x1; - -out: - return err; -} - -static int ufshcd_query_flag_retry(struct ufs_hba *hba, - enum query_opcode opcode, - enum flag_idn idn, bool *flag_res) -{ - int ret; - int retries; - - for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { - ret = ufshcd_query_flag(hba, opcode, idn, flag_res); - if (ret) - dev_dbg(hba->dev, - "%s: failed with error %d, retries %d\n", - __func__, ret, retries); - else - break; - } - - if (ret) - dev_err(hba->dev, - "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", - __func__, opcode, idn, ret, retries); - return ret; -} - -/** - * ufshcd_query_attr - API function for sending attribute requests - * @hba: per-adapter instance - * @opcode: attribute opcode - * @idn: attribute idn to access - * @index: index field - * @selector: selector field - * @attr_val: the attribute value after the query request completes - * - * Return: 0 for success, non-zero in case of failure. - */ -int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, - enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) -{ - struct ufs_query_req *request = NULL; - struct ufs_query_res *response = NULL; - int err; - - if (!attr_val) { - dev_err(hba->dev, - "%s: attribute value required for opcode 0x%x\n", - __func__, opcode); - return -EINVAL; - } - - ufshcd_init_query(hba, &request, &response, opcode, idn, index, selector); - - switch (opcode) { - case UPIU_QUERY_OPCODE_WRITE_ATTR: - request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; - request->upiu_req.value = cpu_to_be32(*attr_val); - break; - case UPIU_QUERY_OPCODE_READ_ATTR: - request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; - break; - default: - dev_err(hba->dev, - "%s: Expected query attr opcode but got = 0x%.2x\n", - __func__, opcode); - err = -EINVAL; - goto out; - } - - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); - - if (err) { - dev_err(hba->dev, - "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", - __func__, opcode, idn, index, err); - goto out; - } - - *attr_val = be32_to_cpu(response->upiu_res.value); - -out: - return err; -} - -/** - * ufshcd_query_attr_retry() - API function for sending query - * attribute with retries - * @hba: per-adapter instance - * @opcode: attribute opcode - * @idn: attribute idn to access - * @index: index field - * @selector: selector field - * @attr_val: the attribute value after the query request - * completes - * - * Return: 0 for success, non-zero in case of failure. - */ -int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, - enum attr_idn idn, u8 index, u8 selector, - u32 *attr_val) -{ - int ret = 0; - u32 retries; - - for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { - ret = ufshcd_query_attr(hba, opcode, idn, index, selector, attr_val); - if (ret) - dev_dbg(hba->dev, - "%s: failed with error %d, retries %d\n", - __func__, ret, retries); - else - break; - } - - if (ret) - dev_err(hba->dev, - "%s: query attribute, idn %d, failed with error %d after %d retries\n", - __func__, idn, ret, QUERY_REQ_RETRIES); - return ret; -} - -static int __ufshcd_query_descriptor(struct ufs_hba *hba, - enum query_opcode opcode, - enum desc_idn idn, u8 index, u8 selector, - u8 *desc_buf, int *buf_len) -{ - struct ufs_query_req *request = NULL; - struct ufs_query_res *response = NULL; - int err; - - if (!desc_buf) { - dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", - __func__, opcode); - err = -EINVAL; - goto out; - } - - if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { - dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", - __func__, *buf_len); - err = -EINVAL; - goto out; - } - - ufshcd_init_query(hba, &request, &response, opcode, idn, index, - selector); - hba->dev_cmd.query.descriptor = desc_buf; - request->upiu_req.length = cpu_to_be16(*buf_len); - - switch (opcode) { - case UPIU_QUERY_OPCODE_WRITE_DESC: - request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; - break; - case UPIU_QUERY_OPCODE_READ_DESC: - request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; - break; - default: - dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n", - __func__, opcode); - err = -EINVAL; - goto out; - } - - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); - - if (err) { - dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", - __func__, opcode, idn, index, err); - goto out; - } - - hba->dev_cmd.query.descriptor = NULL; - *buf_len = be16_to_cpu(response->upiu_res.length); - -out: - return err; -} - -/** - * ufshcd_query_descriptor_retry - API function for sending descriptor requests - */ -static int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode, - enum desc_idn idn, u8 index, u8 selector, - u8 *desc_buf, int *buf_len) -{ - int err; - int retries; - - for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { - err = __ufshcd_query_descriptor(hba, opcode, idn, index, - selector, desc_buf, buf_len); - if (!err || err == -EINVAL) - break; - } - - return err; -} - -/** - * ufshcd_read_desc_length - read the specified descriptor length from header - */ -static int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id, - int desc_index, int *desc_length) -{ - int ret; - u8 header[QUERY_DESC_HDR_SIZE]; - int header_len = QUERY_DESC_HDR_SIZE; - - if (desc_id >= QUERY_DESC_IDN_MAX) - return -EINVAL; - - ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, - desc_id, desc_index, 0, header, - &header_len); - - if (ret) { - dev_err(hba->dev, "%s: Failed to get descriptor header id %d\n", - __func__, desc_id); - return ret; - } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) { - dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch\n", - __func__, header[QUERY_DESC_DESC_TYPE_OFFSET], - desc_id); - ret = -EINVAL; - } - - *desc_length = header[QUERY_DESC_LENGTH_OFFSET]; - - return ret; -} - -static void ufshcd_init_desc_sizes(struct ufs_hba *hba) -{ - int err; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0, - &hba->desc_size.dev_desc); - if (err) - hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0, - &hba->desc_size.pwr_desc); - if (err) - hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0, - &hba->desc_size.interc_desc); - if (err) - hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0, - &hba->desc_size.conf_desc); - if (err) - hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0, - &hba->desc_size.unit_desc); - if (err) - hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0, - &hba->desc_size.geom_desc); - if (err) - hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, - &hba->desc_size.hlth_desc); - if (err) - hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; -} - -/** - * ufshcd_map_desc_id_to_length - map descriptor IDN to its length - * - */ -static int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, - int *desc_len) -{ - switch (desc_id) { - case QUERY_DESC_IDN_DEVICE: - *desc_len = hba->desc_size.dev_desc; - break; - case QUERY_DESC_IDN_POWER: - *desc_len = hba->desc_size.pwr_desc; - break; - case QUERY_DESC_IDN_GEOMETRY: - *desc_len = hba->desc_size.geom_desc; - break; - case QUERY_DESC_IDN_CONFIGURATION: - *desc_len = hba->desc_size.conf_desc; - break; - case QUERY_DESC_IDN_UNIT: - *desc_len = hba->desc_size.unit_desc; - break; - case QUERY_DESC_IDN_INTERCONNECT: - *desc_len = hba->desc_size.interc_desc; - break; - case QUERY_DESC_IDN_STRING: - *desc_len = QUERY_DESC_MAX_SIZE; - break; - case QUERY_DESC_IDN_HEALTH: - *desc_len = hba->desc_size.hlth_desc; - break; - case QUERY_DESC_IDN_RFU_0: - case QUERY_DESC_IDN_RFU_1: - *desc_len = 0; - break; - default: - *desc_len = 0; - return -EINVAL; - } - return 0; -} - -/** - * ufshcd_read_desc_param - read the specified descriptor parameter - * - */ -static int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, - int desc_index, u8 param_offset, - u8 *param_read_buf, u8 param_size) -{ - int ret; - u8 *desc_buf; - int buff_len; - bool is_kmalloc = true; - - /* Safety check */ - if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) - return -EINVAL; - - /* Get the max length of descriptor from structure filled up at probe - * time. - */ - ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); - - /* Sanity checks */ - if (ret || !buff_len) { - dev_err(hba->dev, "%s: Failed to get full descriptor length\n", - __func__); - return ret; - } - - /* Check whether we need temp memory */ - if (param_offset != 0 || param_size < buff_len) { - desc_buf = kmalloc(buff_len, GFP_KERNEL); - if (!desc_buf) - return -ENOMEM; - } else { - desc_buf = param_read_buf; - is_kmalloc = false; - } - - /* Request for full descriptor */ - ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, - desc_id, desc_index, 0, desc_buf, - &buff_len); - - if (ret) { - dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n", - __func__, desc_id, desc_index, param_offset, ret); - goto out; - } - - /* Sanity check */ - if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { - dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", - __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); - ret = -EINVAL; - goto out; - } - - /* Check wherher we will not copy more data, than available */ - if (is_kmalloc && param_size > buff_len) - param_size = buff_len; - - if (is_kmalloc) - memcpy(param_read_buf, &desc_buf[param_offset], param_size); -out: - if (is_kmalloc) - kfree(desc_buf); - return ret; -} - -/* replace non-printable or non-ASCII characters with spaces */ -static inline void ufshcd_remove_non_printable(uint8_t *val) -{ - if (!val) - return; - - if (*val < 0x20 || *val > 0x7e) - *val = ' '; -} - -/** - * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power - * state) and waits for it to take effect. - * - */ -static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) -{ - unsigned long start = 0; - u8 status; - int ret; - - ret = ufshcd_send_uic_cmd(hba, cmd); - if (ret) { - dev_err(hba->dev, - "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", - cmd->command, cmd->argument3, ret); - - return ret; - } - - start = get_timer(0); - do { - status = ufshcd_get_upmcrs(hba); - if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { - dev_err(hba->dev, - "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", - cmd->command, status); - ret = (status != PWR_OK) ? status : -1; - break; - } - } while (status != PWR_LOCAL); - - return ret; -} - -/** - * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change - * using DME_SET primitives. - */ -static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) -{ - struct uic_command uic_cmd = {0}; - int ret; - - uic_cmd.command = UIC_CMD_DME_SET; - uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); - uic_cmd.argument3 = mode; - ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); - - return ret; -} - -static -void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba, - struct scsi_cmd *pccb, u32 upiu_flags) -{ - struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; - unsigned int cdb_len; - - /* command descriptor fields */ - ucd_req_ptr->header.dword_0 = - UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags, - pccb->lun, TASK_TAG); - ucd_req_ptr->header.dword_1 = - UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); - - /* Total EHS length and Data segment length will be zero */ - ucd_req_ptr->header.dword_2 = 0; - - ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen); - - cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE); - memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); - memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len); - - memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); - ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr)); - ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); -} - -static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry, - unsigned char *buf, ulong len) -{ - entry->size = cpu_to_le32(len) | GENMASK(1, 0); - entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf)); - entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf)); -} - -static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb) -{ - struct utp_transfer_req_desc *req_desc = hba->utrdl; - struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr; - ulong datalen = pccb->datalen; - int table_length; - u8 *buf; - int i; - - if (!datalen) { - req_desc->prd_table_length = 0; - ufshcd_cache_flush(req_desc, sizeof(*req_desc)); - return; - } - - table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY); - buf = pccb->pdata; - i = table_length; - while (--i) { - prepare_prdt_desc(&prd_table[table_length - i - 1], buf, - MAX_PRDT_ENTRY - 1); - buf += MAX_PRDT_ENTRY; - datalen -= MAX_PRDT_ENTRY; - } - - prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1); - - req_desc->prd_table_length = table_length; - ufshcd_cache_flush(prd_table, sizeof(*prd_table) * table_length); - ufshcd_cache_flush(req_desc, sizeof(*req_desc)); -} - -static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb) -{ - struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent); - u32 upiu_flags; - int ocs, result = 0; - u8 scsi_status; - - ufshcd_prepare_req_desc_hdr(hba, &upiu_flags, pccb->dma_dir); - ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags); - prepare_prdt_table(hba, pccb); - - ufshcd_cache_flush(pccb->pdata, pccb->datalen); - - ufshcd_send_command(hba, TASK_TAG); - - ufshcd_cache_invalidate(pccb->pdata, pccb->datalen); - - ocs = ufshcd_get_tr_ocs(hba); - switch (ocs) { - case OCS_SUCCESS: - result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); - switch (result) { - case UPIU_TRANSACTION_RESPONSE: - result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr); - - scsi_status = result & MASK_SCSI_STATUS; - if (scsi_status) - return -EINVAL; - - break; - case UPIU_TRANSACTION_REJECT_UPIU: - /* TODO: handle Reject UPIU Response */ - dev_err(hba->dev, - "Reject UPIU not fully implemented\n"); - return -EINVAL; - default: - dev_err(hba->dev, - "Unexpected request response code = %x\n", - result); - return -EINVAL; - } - break; - default: - dev_err(hba->dev, "OCS error from controller = %x\n", ocs); - return -EINVAL; - } - - return 0; -} - -static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, - int desc_index, u8 *buf, u32 size) -{ - return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); -} - -static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) -{ - return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); -} - -/** - * ufshcd_read_string_desc - read string descriptor - * - */ -static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, - u8 *buf, u32 size, bool ascii) -{ - int err = 0; - - err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf, - size); - - if (err) { - dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", - __func__, QUERY_REQ_RETRIES, err); - goto out; - } - - if (ascii) { - int desc_len; - int ascii_len; - int i; - u8 *buff_ascii; - - desc_len = buf[0]; - /* remove header and divide by 2 to move from UTF16 to UTF8 */ - ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; - if (size < ascii_len + QUERY_DESC_HDR_SIZE) { - dev_err(hba->dev, "%s: buffer allocated size is too small\n", - __func__); - err = -ENOMEM; - goto out; - } - - buff_ascii = kmalloc(ascii_len, GFP_KERNEL); - if (!buff_ascii) { - err = -ENOMEM; - goto out; - } - - /* - * the descriptor contains string in UTF16 format - * we need to convert to utf-8 so it can be displayed - */ - utf16_to_utf8(buff_ascii, - (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len); - - /* replace non-printable or non-ASCII characters with spaces */ - for (i = 0; i < ascii_len; i++) - ufshcd_remove_non_printable(&buff_ascii[i]); - - memset(buf + QUERY_DESC_HDR_SIZE, 0, - size - QUERY_DESC_HDR_SIZE); - memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); - buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; - kfree(buff_ascii); - } -out: - return err; -} - -static int ufs_get_device_desc(struct ufs_hba *hba, - struct ufs_dev_desc *dev_desc) -{ - int err; - size_t buff_len; - u8 model_index; - u8 *desc_buf; - - buff_len = max_t(size_t, hba->desc_size.dev_desc, - QUERY_DESC_MAX_SIZE + 1); - desc_buf = kmalloc(buff_len, GFP_KERNEL); - if (!desc_buf) { - err = -ENOMEM; - goto out; - } - - err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); - if (err) { - dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", - __func__, err); - goto out; - } - - /* - * getting vendor (manufacturerID) and Bank Index in big endian - * format - */ - dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | - desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; - - model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; - - /* Zero-pad entire buffer for string termination. */ - memset(desc_buf, 0, buff_len); - - err = ufshcd_read_string_desc(hba, model_index, desc_buf, - QUERY_DESC_MAX_SIZE, true/*ASCII*/); - if (err) { - dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", - __func__, err); - goto out; - } - - desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; - strlcpy(dev_desc->model, (char *)(desc_buf + QUERY_DESC_HDR_SIZE), - min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET], - MAX_MODEL_LEN)); - - /* Null terminate the model string */ - dev_desc->model[MAX_MODEL_LEN] = '\0'; - -out: - kfree(desc_buf); - return err; -} - -struct ufs_ref_clk { - unsigned long freq_hz; - enum ufs_ref_clk_freq val; -}; - -static const struct ufs_ref_clk ufs_ref_clk_freqs[] = { - {19200000, REF_CLK_FREQ_19_2_MHZ}, - {26000000, REF_CLK_FREQ_26_MHZ}, - {38400000, REF_CLK_FREQ_38_4_MHZ}, - {52000000, REF_CLK_FREQ_52_MHZ}, - {0, REF_CLK_FREQ_INVAL}, -}; - -static enum ufs_ref_clk_freq -ufs_get_bref_clk_from_hz(unsigned long freq) -{ - int i; - - for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++) - if (ufs_ref_clk_freqs[i].freq_hz == freq) - return ufs_ref_clk_freqs[i].val; - - return REF_CLK_FREQ_INVAL; -} - -enum ufs_ref_clk_freq ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) -{ - unsigned long freq; - - freq = clk_get_rate(refclk); - return ufs_get_bref_clk_from_hz(freq); -} - -static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) -{ - int err; - struct clk *ref_clk; - u32 host_ref_clk_freq; - u32 dev_ref_clk_freq; - - /* get ref_clk */ - ref_clk = devm_clk_get(hba->dev, "ref_clk"); - if (IS_ERR((const void *)ref_clk)) { - err = PTR_ERR(ref_clk); - goto out; - } - - host_ref_clk_freq = ufshcd_parse_dev_ref_clk_freq(hba, ref_clk); - if (host_ref_clk_freq == REF_CLK_FREQ_INVAL) - dev_err(hba->dev, - "invalid ref_clk setting = %ld\n", clk_get_rate(ref_clk)); - - if (host_ref_clk_freq == REF_CLK_FREQ_INVAL) - goto out; - - err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, - QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &dev_ref_clk_freq); - - if (err) { - dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", err); - goto out; - } - - if (dev_ref_clk_freq == host_ref_clk_freq) - goto out; /* nothing to update */ - - err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, - QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &host_ref_clk_freq); - - if (err) { - dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", - ufs_ref_clk_freqs[host_ref_clk_freq].freq_hz); - goto out; - } - - dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", - ufs_ref_clk_freqs[host_ref_clk_freq].freq_hz); - -out: - return err; -} - -/** - * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device - */ -static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) -{ - struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; - - if (hba->max_pwr_info.is_valid) - return 0; - - if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) { - pwr_info->pwr_tx = FASTAUTO_MODE; - pwr_info->pwr_rx = FASTAUTO_MODE; - } else { - pwr_info->pwr_tx = FAST_MODE; - pwr_info->pwr_rx = FAST_MODE; - } - pwr_info->hs_rate = PA_HS_MODE_B; - - /* Get the connected lane count */ - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), - &pwr_info->lane_rx); - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), - &pwr_info->lane_tx); - - if (!pwr_info->lane_rx || !pwr_info->lane_tx) { - dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", - __func__, pwr_info->lane_rx, pwr_info->lane_tx); - return -EINVAL; - } - - /* - * First, get the maximum gears of HS speed. - * If a zero value, it means there is no HSGEAR capability. - * Then, get the maximum gears of PWM speed. - */ - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); - if (!pwr_info->gear_rx) { - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), - &pwr_info->gear_rx); - if (!pwr_info->gear_rx) { - dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", - __func__, pwr_info->gear_rx); - return -EINVAL; - } - pwr_info->pwr_rx = SLOW_MODE; - } - - ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), - &pwr_info->gear_tx); - if (!pwr_info->gear_tx) { - ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), - &pwr_info->gear_tx); - if (!pwr_info->gear_tx) { - dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", - __func__, pwr_info->gear_tx); - return -EINVAL; - } - pwr_info->pwr_tx = SLOW_MODE; - } - - hba->max_pwr_info.is_valid = true; - return ufshcd_ops_get_max_pwr_mode(hba, &hba->max_pwr_info); -} - -static int ufshcd_change_power_mode(struct ufs_hba *hba, - struct ufs_pa_layer_attr *pwr_mode) -{ - int ret; - - /* if already configured to the requested pwr_mode */ - if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && - pwr_mode->gear_tx == hba->pwr_info.gear_tx && - pwr_mode->lane_rx == hba->pwr_info.lane_rx && - pwr_mode->lane_tx == hba->pwr_info.lane_tx && - pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && - pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && - pwr_mode->hs_rate == hba->pwr_info.hs_rate) { - dev_dbg(hba->dev, "%s: power already configured\n", __func__); - return 0; - } - - /* - * Configure attributes for power mode change with below. - * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, - * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, - * - PA_HSSERIES - */ - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), - pwr_mode->lane_rx); - if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE) - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); - else - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); - - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), - pwr_mode->lane_tx); - if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE) - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); - else - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); - - if (pwr_mode->pwr_rx == FASTAUTO_MODE || - pwr_mode->pwr_tx == FASTAUTO_MODE || - pwr_mode->pwr_rx == FAST_MODE || - pwr_mode->pwr_tx == FAST_MODE) - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), - pwr_mode->hs_rate); - - ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 | - pwr_mode->pwr_tx); - - if (ret) { - dev_err(hba->dev, - "%s: power mode change failed %d\n", __func__, ret); - - return ret; - } - - /* Copy new Power Mode to power info */ - memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr)); - - return ret; -} - -/** - * ufshcd_verify_dev_init() - Verify device initialization - * - */ -static int ufshcd_verify_dev_init(struct ufs_hba *hba) -{ - int retries; - int err; - - for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, - NOP_OUT_TIMEOUT); - if (!err || err == -ETIMEDOUT) - break; - - dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); - } - - if (err) - dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); - - return err; -} - -/** - * ufshcd_complete_dev_init() - checks device readiness - */ -static int ufshcd_complete_dev_init(struct ufs_hba *hba) -{ - int i; - int err; - bool flag_res = 1; - - err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, - QUERY_FLAG_IDN_FDEVICEINIT, NULL); - if (err) { - dev_err(hba->dev, - "%s setting fDeviceInit flag failed with error %d\n", - __func__, err); - goto out; - } - - /* poll for max. 1000 iterations for fDeviceInit flag to clear */ - for (i = 0; i < 1000 && !err && flag_res; i++) - err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, - QUERY_FLAG_IDN_FDEVICEINIT, - &flag_res); - - if (err) - dev_err(hba->dev, - "%s reading fDeviceInit flag failed with error %d\n", - __func__, err); - else if (flag_res) - dev_err(hba->dev, - "%s fDeviceInit was not cleared by the device\n", - __func__); - -out: - return err; -} - -static void ufshcd_def_desc_sizes(struct ufs_hba *hba) -{ - hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; - hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; - hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; - hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; - hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; - hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; - hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; -} - -static int ufs_start(struct ufs_hba *hba) -{ - struct ufs_dev_desc card = {0}; - int ret; - - ret = ufshcd_link_startup(hba); - if (ret) - return ret; - - ret = ufshcd_verify_dev_init(hba); - if (ret) - return ret; - - ret = ufshcd_complete_dev_init(hba); - if (ret) - return ret; - - /* Init check for device descriptor sizes */ - ufshcd_init_desc_sizes(hba); - - ret = ufs_get_device_desc(hba, &card); - if (ret) { - dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", - __func__, ret); - - return ret; - } - - ufshcd_set_dev_ref_clk(hba); - - if (ufshcd_get_max_pwr_mode(hba)) { - dev_err(hba->dev, - "%s: Failed getting max supported power mode\n", - __func__); - } else { - ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info); - if (ret) { - dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", - __func__, ret); - - return ret; - } - - debug("UFS Device %s is up!\n", hba->dev->name); - ufshcd_print_pwr_info(hba); - } - - return 0; -} - -int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops) -{ - struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev); - struct scsi_plat *scsi_plat; - struct udevice *scsi_dev; - void __iomem *mmio_base; - int err; - - device_find_first_child(ufs_dev, &scsi_dev); - if (!scsi_dev) - return -ENODEV; - - scsi_plat = dev_get_uclass_plat(scsi_dev); - scsi_plat->max_id = UFSHCD_MAX_ID; - scsi_plat->max_lun = UFS_MAX_LUNS; - scsi_plat->max_bytes_per_req = UFS_MAX_BYTES; - - hba->dev = ufs_dev; - hba->ops = hba_ops; - - if (device_is_on_pci_bus(ufs_dev)) { - mmio_base = dm_pci_map_bar(ufs_dev, PCI_BASE_ADDRESS_0, 0, 0, - PCI_REGION_TYPE, PCI_REGION_MEM); - } else { - mmio_base = dev_read_addr_ptr(ufs_dev); - } - hba->mmio_base = mmio_base; - - /* Set descriptor lengths to specification defaults */ - ufshcd_def_desc_sizes(hba); - - ufshcd_ops_init(hba); - - /* Read capabilities registers */ - hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); - if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) - hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; - - /* Get UFS version supported by the controller */ - hba->version = ufshcd_get_ufs_version(hba); - if (hba->version != UFSHCI_VERSION_10 && - hba->version != UFSHCI_VERSION_11 && - hba->version != UFSHCI_VERSION_20 && - hba->version != UFSHCI_VERSION_21 && - hba->version != UFSHCI_VERSION_30 && - hba->version != UFSHCI_VERSION_31 && - hba->version != UFSHCI_VERSION_40) - dev_err(hba->dev, "invalid UFS version 0x%x\n", - hba->version); - - /* Get Interrupt bit mask per version */ - hba->intr_mask = ufshcd_get_intr_mask(hba); - - /* Allocate memory for host memory space */ - err = ufshcd_memory_alloc(hba); - if (err) { - dev_err(hba->dev, "Memory allocation failed\n"); - return err; - } - - /* Configure Local data structures */ - ufshcd_host_memory_configure(hba); - - /* - * In order to avoid any spurious interrupt immediately after - * registering UFS controller interrupt handler, clear any pending UFS - * interrupt status and disable all the UFS interrupts. - */ - ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), - REG_INTERRUPT_STATUS); - ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); - - mb(); /* flush previous writes */ - - /* Reset the attached device */ - ufshcd_device_reset(hba); - - err = ufshcd_hba_enable(hba); - if (err) { - dev_err(hba->dev, "Host controller enable failed\n"); - return err; - } - - err = ufs_start(hba); - if (err) - return err; - - return 0; -} - -int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp) -{ - int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi", - scsi_devp); - - return ret; -} - -#if IS_ENABLED(CONFIG_BOUNCE_BUFFER) -static int ufs_scsi_buffer_aligned(struct udevice *scsi_dev, struct bounce_buffer *state) -{ -#ifdef CONFIG_PHYS_64BIT - struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent); - uintptr_t ubuf = (uintptr_t)state->user_buffer; - size_t len = state->len_aligned; - - /* Check if below 32bit boundary */ - if ((hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) && - ((ubuf >> 32) || (ubuf + len) >> 32)) { - dev_dbg(scsi_dev, "Buffer above 32bit boundary %lx-%lx\n", - ubuf, ubuf + len); - return 0; - } -#endif - return 1; -} -#endif /* CONFIG_BOUNCE_BUFFER */ - -static struct scsi_ops ufs_ops = { - .exec = ufs_scsi_exec, -#if IS_ENABLED(CONFIG_BOUNCE_BUFFER) - .buffer_aligned = ufs_scsi_buffer_aligned, -#endif /* CONFIG_BOUNCE_BUFFER */ -}; - -int ufs_probe_dev(int index) -{ - struct udevice *dev; - - return uclass_get_device(UCLASS_UFS, index, &dev); -} - -int ufs_probe(void) -{ - struct udevice *dev; - int ret, i; - - for (i = 0;; i++) { - ret = uclass_get_device(UCLASS_UFS, i, &dev); - if (ret == -ENODEV) - break; - } - - return 0; -} - -U_BOOT_DRIVER(ufs_scsi) = { - .id = UCLASS_SCSI, - .name = "ufs_scsi", - .ops = &ufs_ops, -}; - -UCLASS_DRIVER(ufs) = { - .id = UCLASS_UFS, - .name = "ufs", - .per_device_auto = sizeof(struct ufs_hba), -}; -- cgit v1.2.3 From 6104ae290ef159fbe2be1025f86b0584ad6beb5d Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Tue, 28 Oct 2025 15:22:24 +0100 Subject: ufs: amd-versal2: Fix indent Fix indent, use tabs. No functional change. Reviewed-by: Neil Armstrong Signed-off-by: Marek Vasut Link: https://patch.msgid.link/20251028142335.18125-4-marek.vasut+renesas@mailbox.org Signed-off-by: Neil Armstrong --- drivers/ufs/ufs-amd-versal2.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/ufs/ufs-amd-versal2.c b/drivers/ufs/ufs-amd-versal2.c index 896dda2de4e..89931921ff9 100644 --- a/drivers/ufs/ufs-amd-versal2.c +++ b/drivers/ufs/ufs-amd-versal2.c @@ -330,7 +330,7 @@ static int ufs_versal2_init(struct ufs_hba *hba) return PTR_ERR(priv->rstphy); } - ret = zynqmp_pm_ufs_cal_reg(&cal); + ret = zynqmp_pm_ufs_cal_reg(&cal); if (ret) return ret; @@ -567,9 +567,9 @@ static const struct udevice_id ufs_versal2_ids[] = { }; U_BOOT_DRIVER(ufs_versal2_pltfm) = { - .name = "ufs-versal2-pltfm", - .id = UCLASS_UFS, - .of_match = ufs_versal2_ids, - .probe = ufs_versal2_probe, - .bind = ufs_versal2_bind, + .name = "ufs-versal2-pltfm", + .id = UCLASS_UFS, + .of_match = ufs_versal2_ids, + .probe = ufs_versal2_probe, + .bind = ufs_versal2_bind, }; -- cgit v1.2.3 From 067c1b03328267961816c76ff474c7312917409c Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Tue, 28 Oct 2025 15:22:25 +0100 Subject: ufs: Call ufs_scsi_bind() from uclass .post_bind Instead of duplicating the ufs_scsi_bind() call in every driver, call it from UFS uclass .post_bind callback for every driver in one place. While doing so, inline ufs_scsi_bind() directly into ufs_post_bind() as trivial device_bind_driver() call. Reviewed-by: Neil Armstrong Signed-off-by: Marek Vasut Link: https://patch.msgid.link/20251028142335.18125-5-marek.vasut+renesas@mailbox.org [narmstrong: also updated the rockchip and mediatek drivers] Signed-off-by: Neil Armstrong --- drivers/ufs/cdns-platform.c | 8 - drivers/ufs/ufs-amd-versal2.c | 8 - drivers/ufs/ufs-mediatek.c | 8 - drivers/ufs/ufs-pci.c | 8 - drivers/ufs/ufs-qcom.c | 8 - drivers/ufs/ufs-renesas.c | 8 - drivers/ufs/ufs-rockchip.c | 8 - drivers/ufs/ufs-uclass.c | 2308 +++++++++++++++++++++++++++++++++++++++++ 8 files changed, 2308 insertions(+), 56 deletions(-) create mode 100644 drivers/ufs/ufs-uclass.c diff --git a/drivers/ufs/cdns-platform.c b/drivers/ufs/cdns-platform.c index 510a6a6aa5d..87d9c5bad79 100644 --- a/drivers/ufs/cdns-platform.c +++ b/drivers/ufs/cdns-platform.c @@ -101,13 +101,6 @@ static int cdns_ufs_pltfm_probe(struct udevice *dev) return err; } -static int cdns_ufs_pltfm_bind(struct udevice *dev) -{ - struct udevice *scsi_dev; - - return ufs_scsi_bind(dev, &scsi_dev); -} - static const struct udevice_id cdns_ufs_pltfm_ids[] = { { .compatible = "cdns,ufshc-m31-16nm", @@ -120,5 +113,4 @@ U_BOOT_DRIVER(cdns_ufs_pltfm) = { .id = UCLASS_UFS, .of_match = cdns_ufs_pltfm_ids, .probe = cdns_ufs_pltfm_probe, - .bind = cdns_ufs_pltfm_bind, }; diff --git a/drivers/ufs/ufs-amd-versal2.c b/drivers/ufs/ufs-amd-versal2.c index 89931921ff9..bf23439e59d 100644 --- a/drivers/ufs/ufs-amd-versal2.c +++ b/drivers/ufs/ufs-amd-versal2.c @@ -552,13 +552,6 @@ static int ufs_versal2_probe(struct udevice *dev) return ret; } -static int ufs_versal2_bind(struct udevice *dev) -{ - struct udevice *scsi_dev; - - return ufs_scsi_bind(dev, &scsi_dev); -} - static const struct udevice_id ufs_versal2_ids[] = { { .compatible = "amd,versal2-ufs", @@ -571,5 +564,4 @@ U_BOOT_DRIVER(ufs_versal2_pltfm) = { .id = UCLASS_UFS, .of_match = ufs_versal2_ids, .probe = ufs_versal2_probe, - .bind = ufs_versal2_bind, }; diff --git a/drivers/ufs/ufs-mediatek.c b/drivers/ufs/ufs-mediatek.c index 10e7990d6bb..e860d765eea 100644 --- a/drivers/ufs/ufs-mediatek.c +++ b/drivers/ufs/ufs-mediatek.c @@ -381,13 +381,6 @@ static int ufs_mtk_probe(struct udevice *dev) return 0; } -static int ufs_mtk_bind(struct udevice *dev) -{ - struct udevice *scsi_dev; - - return ufs_scsi_bind(dev, &scsi_dev); -} - static const struct udevice_id ufs_mtk_ids[] = { { .compatible = "mediatek,mt6878-ufshci" }, {}, @@ -398,6 +391,5 @@ U_BOOT_DRIVER(mediatek_ufshci) = { .id = UCLASS_UFS, .of_match = ufs_mtk_ids, .probe = ufs_mtk_probe, - .bind = ufs_mtk_bind, .priv_auto = sizeof(struct ufs_mtk_host), }; diff --git a/drivers/ufs/ufs-pci.c b/drivers/ufs/ufs-pci.c index 871f3f50f5c..5b9c72a695d 100644 --- a/drivers/ufs/ufs-pci.c +++ b/drivers/ufs/ufs-pci.c @@ -11,13 +11,6 @@ #include #include "ufs.h" -static int ufs_pci_bind(struct udevice *dev) -{ - struct udevice *scsi_dev; - - return ufs_scsi_bind(dev, &scsi_dev); -} - static int ufs_pci_probe(struct udevice *dev) { int err; @@ -32,7 +25,6 @@ static int ufs_pci_probe(struct udevice *dev) U_BOOT_DRIVER(ufs_pci) = { .name = "ufs_pci", .id = UCLASS_UFS, - .bind = ufs_pci_bind, .probe = ufs_pci_probe, }; diff --git a/drivers/ufs/ufs-qcom.c b/drivers/ufs/ufs-qcom.c index 843585726c7..9f0a6940d98 100644 --- a/drivers/ufs/ufs-qcom.c +++ b/drivers/ufs/ufs-qcom.c @@ -648,13 +648,6 @@ static int ufs_qcom_probe(struct udevice *dev) return 0; } -static int ufs_qcom_bind(struct udevice *dev) -{ - struct udevice *scsi_dev; - - return ufs_scsi_bind(dev, &scsi_dev); -} - static const struct udevice_id ufs_qcom_ids[] = { { .compatible = "qcom,ufshc" }, {}, @@ -665,6 +658,5 @@ U_BOOT_DRIVER(qcom_ufshcd) = { .id = UCLASS_UFS, .of_match = ufs_qcom_ids, .probe = ufs_qcom_probe, - .bind = ufs_qcom_bind, .priv_auto = sizeof(struct ufs_qcom_priv), }; diff --git a/drivers/ufs/ufs-renesas.c b/drivers/ufs/ufs-renesas.c index ae05bdc8102..0b74b39dc2a 100644 --- a/drivers/ufs/ufs-renesas.c +++ b/drivers/ufs/ufs-renesas.c @@ -351,13 +351,6 @@ static struct ufs_hba_ops ufs_renesas_vops = { .hce_enable_notify = ufs_renesas_hce_enable_notify, }; -static int ufs_renesas_pltfm_bind(struct udevice *dev) -{ - struct udevice *scsi_dev; - - return ufs_scsi_bind(dev, &scsi_dev); -} - static int ufs_renesas_pltfm_probe(struct udevice *dev) { struct ufs_renesas_priv *priv = dev_get_priv(dev); @@ -405,7 +398,6 @@ U_BOOT_DRIVER(ufs_renesas) = { .name = "ufs-renesas", .id = UCLASS_UFS, .of_match = ufs_renesas_pltfm_ids, - .bind = ufs_renesas_pltfm_bind, .probe = ufs_renesas_pltfm_probe, .remove = ufs_renesas_pltfm_remove, .priv_auto = sizeof(struct ufs_renesas_priv), diff --git a/drivers/ufs/ufs-rockchip.c b/drivers/ufs/ufs-rockchip.c index aa924e5cedc..0384244387d 100644 --- a/drivers/ufs/ufs-rockchip.c +++ b/drivers/ufs/ufs-rockchip.c @@ -194,18 +194,10 @@ static int ufs_rockchip_probe(struct udevice *dev) return err; } -static int ufs_rockchip_bind(struct udevice *dev) -{ - struct udevice *scsi_dev; - - return ufs_scsi_bind(dev, &scsi_dev); -} - U_BOOT_DRIVER(rockchip_ufs) = { .name = "ufshcd-rockchip", .id = UCLASS_UFS, .of_match = ufs_rockchip_of_match, .probe = ufs_rockchip_probe, - .bind = ufs_rockchip_bind, .priv_auto = sizeof(struct ufs_rockchip_host), }; diff --git a/drivers/ufs/ufs-uclass.c b/drivers/ufs/ufs-uclass.c new file mode 100644 index 00000000000..3c8e4299259 --- /dev/null +++ b/drivers/ufs/ufs-uclass.c @@ -0,0 +1,2308 @@ +// SPDX-License-Identifier: GPL-2.0+ +/** + * ufs.c - Universal Flash Storage (UFS) driver + * + * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported + * to u-boot. + * + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ufs.h" + +#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ + UTP_TASK_REQ_COMPL |\ + UFSHCD_ERROR_MASK) +/* maximum number of link-startup retries */ +#define DME_LINKSTARTUP_RETRIES 3 + +/* maximum number of retries for a general UIC command */ +#define UFS_UIC_COMMAND_RETRIES 3 + +/* Query request retries */ +#define QUERY_REQ_RETRIES 3 +/* Query request timeout */ +#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ + +/* maximum timeout in ms for a general UIC command */ +#define UFS_UIC_CMD_TIMEOUT 1000 +/* NOP OUT retries waiting for NOP IN response */ +#define NOP_OUT_RETRIES 10 +/* Timeout after 30 msecs if NOP OUT hangs without response */ +#define NOP_OUT_TIMEOUT 30 /* msecs */ + +/* Only use one Task Tag for all requests */ +#define TASK_TAG 0 + +/* Expose the flag value from utp_upiu_query.value */ +#define MASK_QUERY_UPIU_FLAG_LOC 0xFF + +#define MAX_PRDT_ENTRY 262144 + +/* maximum bytes per request */ +#define UFS_MAX_BYTES (128 * 256 * 1024) + +static inline bool ufshcd_is_hba_active(struct ufs_hba *hba); +static inline void ufshcd_hba_stop(struct ufs_hba *hba); +static int ufshcd_hba_enable(struct ufs_hba *hba); + +/* + * ufshcd_wait_for_register - wait for register value to change + */ +static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, + u32 val, unsigned long timeout_ms) +{ + int err = 0; + unsigned long start = get_timer(0); + + /* ignore bits that we don't intend to wait on */ + val = val & mask; + + while ((ufshcd_readl(hba, reg) & mask) != val) { + if (get_timer(start) > timeout_ms) { + if ((ufshcd_readl(hba, reg) & mask) != val) + err = -ETIMEDOUT; + break; + } + } + + return err; +} + +/** + * ufshcd_init_pwr_info - setting the POR (power on reset) + * values in hba power info + */ +static void ufshcd_init_pwr_info(struct ufs_hba *hba) +{ + hba->pwr_info.gear_rx = UFS_PWM_G1; + hba->pwr_info.gear_tx = UFS_PWM_G1; + hba->pwr_info.lane_rx = 1; + hba->pwr_info.lane_tx = 1; + hba->pwr_info.pwr_rx = SLOWAUTO_MODE; + hba->pwr_info.pwr_tx = SLOWAUTO_MODE; + hba->pwr_info.hs_rate = 0; +} + +/** + * ufshcd_print_pwr_info - print power params as saved in hba + * power info + */ +static void ufshcd_print_pwr_info(struct ufs_hba *hba) +{ + static const char * const names[] = { + "INVALID MODE", + "FAST MODE", + "SLOW_MODE", + "INVALID MODE", + "FASTAUTO_MODE", + "SLOWAUTO_MODE", + "INVALID MODE", + }; + + dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", + hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, + hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, + names[hba->pwr_info.pwr_rx], + names[hba->pwr_info.pwr_tx], + hba->pwr_info.hs_rate); +} + +static void ufshcd_device_reset(struct ufs_hba *hba) +{ + ufshcd_vops_device_reset(hba); +} + +/** + * ufshcd_ready_for_uic_cmd - Check if controller is ready + * to accept UIC commands + */ +static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) +{ + if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) + return true; + else + return false; +} + +/** + * ufshcd_get_uic_cmd_result - Get the UIC command result + */ +static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & + MASK_UIC_COMMAND_RESULT; +} + +/** + * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command + */ +static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); +} + +/** + * ufshcd_is_device_present - Check if any device connected to + * the host controller + */ +static inline bool ufshcd_is_device_present(struct ufs_hba *hba) +{ + return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & + DEVICE_PRESENT) ? true : false; +} + +/** + * ufshcd_send_uic_cmd - UFS Interconnect layer command API + * + */ +static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +{ + unsigned long start = 0; + u32 intr_status; + u32 enabled_intr_status; + + if (!ufshcd_ready_for_uic_cmd(hba)) { + dev_err(hba->dev, + "Controller not ready to accept UIC commands\n"); + return -EIO; + } + + debug("sending uic command:%d\n", uic_cmd->command); + + /* Write Args */ + ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); + ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); + ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); + + /* Write UIC Cmd */ + ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, + REG_UIC_COMMAND); + + start = get_timer(0); + do { + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + enabled_intr_status = intr_status & hba->intr_mask; + ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); + + if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { + dev_err(hba->dev, + "Timedout waiting for UIC response\n"); + + return -ETIMEDOUT; + } + + if (enabled_intr_status & UFSHCD_ERROR_MASK) { + dev_err(hba->dev, "Error in status:%08x\n", + enabled_intr_status); + + return -1; + } + } while (!(enabled_intr_status & UFSHCD_UIC_MASK)); + + uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba); + uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba); + + debug("Sent successfully\n"); + + return 0; +} + +int ufshcd_dme_configure_adapt(struct ufs_hba *hba, + int agreed_gear, + int adapt_val) +{ + int ret; + + if (agreed_gear < UFS_HS_G4) + adapt_val = PA_NO_ADAPT; + + ret = ufshcd_dme_set(hba, + UIC_ARG_MIB(PA_TXHSADAPTTYPE), + adapt_val); + return ret; +} + +/** + * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET + * + */ +int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, + u32 mib_val, u8 peer) +{ + struct uic_command uic_cmd = {0}; + static const char *const action[] = { + "dme-set", + "dme-peer-set" + }; + const char *set = action[!!peer]; + int ret; + int retries = UFS_UIC_COMMAND_RETRIES; + + uic_cmd.command = peer ? + UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; + uic_cmd.argument1 = attr_sel; + uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); + uic_cmd.argument3 = mib_val; + + do { + /* for peer attributes we retry upon failure */ + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); + } while (ret && peer && --retries); + + if (ret) + dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, + UFS_UIC_COMMAND_RETRIES - retries); + + return ret; +} + +/** + * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET + * + */ +int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, + u32 *mib_val, u8 peer) +{ + struct uic_command uic_cmd = {0}; + static const char *const action[] = { + "dme-get", + "dme-peer-get" + }; + const char *get = action[!!peer]; + int ret; + int retries = UFS_UIC_COMMAND_RETRIES; + + uic_cmd.command = peer ? + UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; + uic_cmd.argument1 = attr_sel; + + do { + /* for peer attributes we retry upon failure */ + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", + get, UIC_GET_ATTR_ID(attr_sel), ret); + } while (ret && peer && --retries); + + if (ret) + dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", + get, UIC_GET_ATTR_ID(attr_sel), + UFS_UIC_COMMAND_RETRIES - retries); + + if (mib_val && !ret) + *mib_val = uic_cmd.argument3; + + return ret; +} + +static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) +{ + u32 tx_lanes, i, err = 0; + + if (!peer) + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &tx_lanes); + else + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &tx_lanes); + for (i = 0; i < tx_lanes; i++) { + unsigned int val = UIC_ARG_MIB_SEL(TX_LCC_ENABLE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)); + if (!peer) + err = ufshcd_dme_set(hba, val, 0); + else + err = ufshcd_dme_peer_set(hba, val, 0); + if (err) { + dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d\n", + __func__, peer, i, err); + break; + } + } + + return err; +} + +static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) +{ + return ufshcd_disable_tx_lcc(hba, true); +} + +/** + * ufshcd_dme_link_startup - Notify Unipro to perform link startup + * + */ +static int ufshcd_dme_link_startup(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, + "dme-link-startup: error code %d\n", ret); + return ret; +} + +int ufshcd_dme_enable(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_ENABLE; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, + "dme-enable: error code %d\n", ret); + + return ret; +} + +int ufshcd_dme_reset(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_RESET; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, + "dme-reset: error code %d\n", ret); + + return ret; +} + +/** + * ufshcd_disable_intr_aggr - Disables interrupt aggregation. + * + */ +static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) +{ + ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); +} + +/** + * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY + */ +static inline int ufshcd_get_lists_status(u32 reg) +{ + return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); +} + +/** + * ufshcd_enable_run_stop_reg - Enable run-stop registers, + * When run-stop registers are set to 1, it indicates the + * host controller that it can process the requests + */ +static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) +{ + ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, + REG_UTP_TASK_REQ_LIST_RUN_STOP); + ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, + REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); +} + +/** + * ufshcd_enable_intr - enable interrupts + */ +static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + u32 rw; + + if (hba->version == UFSHCI_VERSION_10) { + rw = set & INTERRUPT_MASK_RW_VER_10; + set = rw | ((set ^ intrs) & intrs); + } else { + set |= intrs; + } + + ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); + + hba->intr_mask = set; +} + +/** + * ufshcd_make_hba_operational - Make UFS controller operational + * + * To bring UFS host controller to operational state, + * 1. Enable required interrupts + * 2. Configure interrupt aggregation + * 3. Program UTRL and UTMRL base address + * 4. Configure run-stop-registers + * + */ +static int ufshcd_make_hba_operational(struct ufs_hba *hba) +{ + int err = 0; + u32 reg; + + /* Enable required interrupts */ + ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); + + /* Disable interrupt aggregation */ + ufshcd_disable_intr_aggr(hba); + + /* Configure UTRL and UTMRL base address registers */ + ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl), + REG_UTP_TRANSFER_REQ_LIST_BASE_L); + ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl), + REG_UTP_TRANSFER_REQ_LIST_BASE_H); + ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl), + REG_UTP_TASK_REQ_LIST_BASE_L); + ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl), + REG_UTP_TASK_REQ_LIST_BASE_H); + + /* + * Make sure base address and interrupt setup are updated before + * enabling the run/stop registers below. + */ + wmb(); + + /* + * UCRDY, UTMRLDY and UTRLRDY bits must be 1 + */ + reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); + if (!(ufshcd_get_lists_status(reg))) { + ufshcd_enable_run_stop_reg(hba); + } else { + dev_err(hba->dev, + "Host controller not ready to process requests\n"); + err = -EIO; + goto out; + } + +out: + return err; +} + +/** + * ufshcd_link_startup - Initialize unipro link startup + */ +static int ufshcd_link_startup(struct ufs_hba *hba) +{ + int ret; + int retries = DME_LINKSTARTUP_RETRIES; + + do { + ufshcd_ops_link_startup_notify(hba, PRE_CHANGE); + + ret = ufshcd_dme_link_startup(hba); + + /* check if device is detected by inter-connect layer */ + if (!ret && !ufshcd_is_device_present(hba)) { + dev_err(hba->dev, "%s: Device not present\n", __func__); + ret = -ENXIO; + goto out; + } + + /* + * DME link lost indication is only received when link is up, + * but we can't be sure if the link is up until link startup + * succeeds. So reset the local Uni-Pro and try again. + */ + if (ret && ufshcd_hba_enable(hba)) + goto out; + } while (ret && retries--); + + if (ret) + /* failed to get the link up... retire */ + goto out; + + /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ + ufshcd_init_pwr_info(hba); + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { + ret = ufshcd_disable_device_tx_lcc(hba); + if (ret) + goto out; + } + + /* Include any host controller configuration via UIC commands */ + ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE); + if (ret) + goto out; + + /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */ + ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); + ret = ufshcd_make_hba_operational(hba); +out: + if (ret) + dev_err(hba->dev, "link startup failed %d\n", ret); + + return ret; +} + +/** + * ufshcd_hba_stop - Send controller to reset state + */ +static inline void ufshcd_hba_stop(struct ufs_hba *hba) +{ + int err; + + ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); + err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, + CONTROLLER_ENABLE, CONTROLLER_DISABLE, + 10); + if (err) + dev_err(hba->dev, "%s: Controller disable failed\n", __func__); +} + +/** + * ufshcd_is_hba_active - Get controller state + */ +static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) +{ + return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE) + ? false : true; +} + +/** + * ufshcd_hba_start - Start controller initialization sequence + */ +static inline void ufshcd_hba_start(struct ufs_hba *hba) +{ + ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); +} + +/** + * ufshcd_hba_enable - initialize the controller + */ +static int ufshcd_hba_enable(struct ufs_hba *hba) +{ + int retry; + + if (!ufshcd_is_hba_active(hba)) + /* change controller state to "reset state" */ + ufshcd_hba_stop(hba); + + ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE); + + /* start controller initialization sequence */ + ufshcd_hba_start(hba); + + /* + * To initialize a UFS host controller HCE bit must be set to 1. + * During initialization the HCE bit value changes from 1->0->1. + * When the host controller completes initialization sequence + * it sets the value of HCE bit to 1. The same HCE bit is read back + * to check if the controller has completed initialization sequence. + * So without this delay the value HCE = 1, set in the previous + * instruction might be read back. + * This delay can be changed based on the controller. + */ + mdelay(1); + + /* wait for the host controller to complete initialization */ + retry = 10; + while (ufshcd_is_hba_active(hba)) { + if (retry) { + retry--; + } else { + dev_err(hba->dev, "Controller enable failed\n"); + return -EIO; + } + mdelay(5); + } + + /* enable UIC related interrupts */ + ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); + + ufshcd_ops_hce_enable_notify(hba, POST_CHANGE); + + return 0; +} + +/** + * ufshcd_host_memory_configure - configure local reference block with + * memory offsets + */ +static void ufshcd_host_memory_configure(struct ufs_hba *hba) +{ + struct utp_transfer_req_desc *utrdlp; + dma_addr_t cmd_desc_dma_addr; + u16 response_offset; + u16 prdt_offset; + + utrdlp = hba->utrdl; + cmd_desc_dma_addr = (dma_addr_t)hba->ucdl; + + utrdlp->command_desc_base_addr_lo = + cpu_to_le32(lower_32_bits(cmd_desc_dma_addr)); + utrdlp->command_desc_base_addr_hi = + cpu_to_le32(upper_32_bits(cmd_desc_dma_addr)); + + response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu); + prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); + + utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2); + utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2); + utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); + + hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl; + hba->ucd_rsp_ptr = + (struct utp_upiu_rsp *)&hba->ucdl->response_upiu; + hba->ucd_prdt_ptr = + (struct ufshcd_sg_entry *)&hba->ucdl->prd_table; +} + +/** + * ufshcd_memory_alloc - allocate memory for host memory space data structures + */ +static int ufshcd_memory_alloc(struct ufs_hba *hba) +{ + /* Allocate one Transfer Request Descriptor + * Should be aligned to 1k boundary. + */ + hba->utrdl = memalign(1024, + ALIGN(sizeof(struct utp_transfer_req_desc), + ARCH_DMA_MINALIGN)); + if (!hba->utrdl) { + dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n"); + return -ENOMEM; + } + + /* Allocate one Command Descriptor + * Should be aligned to 1k boundary. + */ + hba->ucdl = memalign(1024, + ALIGN(sizeof(struct utp_transfer_cmd_desc), + ARCH_DMA_MINALIGN)); + if (!hba->ucdl) { + dev_err(hba->dev, "Command descriptor memory allocation failed\n"); + return -ENOMEM; + } + + return 0; +} + +/** + * ufshcd_get_intr_mask - Get the interrupt bit mask + */ +static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) +{ + u32 intr_mask = 0; + + switch (hba->version) { + case UFSHCI_VERSION_10: + intr_mask = INTERRUPT_MASK_ALL_VER_10; + break; + case UFSHCI_VERSION_11: + case UFSHCI_VERSION_20: + intr_mask = INTERRUPT_MASK_ALL_VER_11; + break; + case UFSHCI_VERSION_21: + default: + intr_mask = INTERRUPT_MASK_ALL_VER_21; + break; + } + + return intr_mask; +} + +/** + * ufshcd_get_ufs_version - Get the UFS version supported by the HBA + */ +static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_UFS_VERSION); +} + +/** + * ufshcd_get_upmcrs - Get the power mode change request status + */ +static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) +{ + return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; +} + +/** + * ufshcd_cache_flush - Flush cache + * + * Flush cache in aligned address..address+size range. + */ +static void ufshcd_cache_flush(void *addr, unsigned long size) +{ + uintptr_t start_addr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1); + uintptr_t end_addr = ALIGN((uintptr_t)addr + size, ARCH_DMA_MINALIGN); + + flush_dcache_range(start_addr, end_addr); +} + +/** + * ufshcd_cache_invalidate - Invalidate cache + * + * Invalidate cache in aligned address..address+size range. + */ +static void ufshcd_cache_invalidate(void *addr, unsigned long size) +{ + uintptr_t start_addr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1); + uintptr_t end_addr = ALIGN((uintptr_t)addr + size, ARCH_DMA_MINALIGN); + + invalidate_dcache_range(start_addr, end_addr); +} + +/** + * ufshcd_prepare_req_desc_hdr() - Fills the requests header + * descriptor according to request + */ +static void ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, + u32 *upiu_flags, + enum dma_data_direction cmd_dir) +{ + struct utp_transfer_req_desc *req_desc = hba->utrdl; + u32 data_direction; + u32 dword_0; + + if (cmd_dir == DMA_FROM_DEVICE) { + data_direction = UTP_DEVICE_TO_HOST; + *upiu_flags = UPIU_CMD_FLAGS_READ; + } else if (cmd_dir == DMA_TO_DEVICE) { + data_direction = UTP_HOST_TO_DEVICE; + *upiu_flags = UPIU_CMD_FLAGS_WRITE; + } else { + data_direction = UTP_NO_DATA_TRANSFER; + *upiu_flags = UPIU_CMD_FLAGS_NONE; + } + + dword_0 = (data_direction << UPIU_DATA_DIRECTION_OFFSET) + | (0x1 << UPIU_COMMAND_TYPE_OFFSET); + + /* Enable Interrupt for command */ + dword_0 |= UTP_REQ_DESC_INT_CMD; + + /* Transfer request descriptor header fields */ + req_desc->header.dword_0 = cpu_to_le32(dword_0); + /* dword_1 is reserved, hence it is set to 0 */ + req_desc->header.dword_1 = 0; + /* + * assigning invalid value for command status. Controller + * updates OCS on command completion, with the command + * status + */ + req_desc->header.dword_2 = + cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + /* dword_3 is reserved, hence it is set to 0 */ + req_desc->header.dword_3 = 0; + + req_desc->prd_table_length = 0; + + ufshcd_cache_flush(req_desc, sizeof(*req_desc)); +} + +static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, + u32 upiu_flags) +{ + struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; + struct ufs_query *query = &hba->dev_cmd.query; + u16 len = be16_to_cpu(query->request.upiu_req.length); + + /* Query request header */ + ucd_req_ptr->header.dword_0 = + UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ, + upiu_flags, 0, TASK_TAG); + ucd_req_ptr->header.dword_1 = + UPIU_HEADER_DWORD(0, query->request.query_func, + 0, 0); + + /* Data segment length only need for WRITE_DESC */ + if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) + ucd_req_ptr->header.dword_2 = + UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); + else + ucd_req_ptr->header.dword_2 = 0; + + /* Copy the Query Request buffer as is */ + memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE); + + /* Copy the Descriptor */ + if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) { + memcpy(ucd_req_ptr + 1, query->descriptor, len); + ufshcd_cache_flush(ucd_req_ptr, 2 * sizeof(*ucd_req_ptr)); + } else { + ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr)); + } + + memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); + ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); +} + +static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba) +{ + struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; + + memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); + + /* command descriptor fields */ + ucd_req_ptr->header.dword_0 = + UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, TASK_TAG); + /* clear rest of the fields of basic header */ + ucd_req_ptr->header.dword_1 = 0; + ucd_req_ptr->header.dword_2 = 0; + + memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); + + ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr)); + ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); +} + +/** + * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU) + * for Device Management Purposes + */ +static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, + enum dev_cmd_type cmd_type) +{ + u32 upiu_flags; + int ret = 0; + + hba->dev_cmd.type = cmd_type; + + ufshcd_prepare_req_desc_hdr(hba, &upiu_flags, DMA_NONE); + switch (cmd_type) { + case DEV_CMD_TYPE_QUERY: + ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags); + break; + case DEV_CMD_TYPE_NOP: + ufshcd_prepare_utp_nop_upiu(hba); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) +{ + unsigned long start; + u32 intr_status; + u32 enabled_intr_status; + + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); + + /* Make sure doorbell reg is updated before reading interrupt status */ + wmb(); + + start = get_timer(0); + do { + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + enabled_intr_status = intr_status & hba->intr_mask; + ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); + + if (get_timer(start) > QUERY_REQ_TIMEOUT) { + dev_err(hba->dev, + "Timedout waiting for UTP response\n"); + + return -ETIMEDOUT; + } + + if (enabled_intr_status & UFSHCD_ERROR_MASK) { + dev_err(hba->dev, "Error in status:%08x\n", + enabled_intr_status); + + return -1; + } + } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL)); + + return 0; +} + +/** + * ufshcd_get_req_rsp - returns the TR response transaction type + */ +static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + ufshcd_cache_invalidate(ucd_rsp_ptr, sizeof(*ucd_rsp_ptr)); + + return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; +} + +/** + * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status + * + */ +static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba) +{ + struct utp_transfer_req_desc *req_desc = hba->utrdl; + + ufshcd_cache_invalidate(req_desc, sizeof(*req_desc)); + + return le32_to_cpu(req_desc->header.dword_2) & MASK_OCS; +} + +static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; +} + +static int ufshcd_check_query_response(struct ufs_hba *hba) +{ + struct ufs_query_res *query_res = &hba->dev_cmd.query.response; + + /* Get the UPIU response */ + query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >> + UPIU_RSP_CODE_OFFSET; + return query_res->response; +} + +/** + * ufshcd_copy_query_response() - Copy the Query Response and the data + * descriptor + */ +static int ufshcd_copy_query_response(struct ufs_hba *hba) +{ + struct ufs_query_res *query_res = &hba->dev_cmd.query.response; + + memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); + + /* Get the descriptor */ + if (hba->dev_cmd.query.descriptor && + hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { + u8 *descp = (u8 *)hba->ucd_rsp_ptr + + GENERAL_UPIU_REQUEST_SIZE; + u16 resp_len; + u16 buf_len; + + /* data segment length */ + resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) & + MASK_QUERY_DATA_SEG_LEN; + buf_len = + be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length); + if (likely(buf_len >= resp_len)) { + memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); + } else { + dev_warn(hba->dev, + "%s: Response size is bigger than buffer\n", + __func__); + return -EINVAL; + } + } + + return 0; +} + +/** + * ufshcd_exec_dev_cmd - API for sending device management requests + */ +static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, + int timeout) +{ + int err; + int resp; + + err = ufshcd_comp_devman_upiu(hba, cmd_type); + if (err) + return err; + + err = ufshcd_send_command(hba, TASK_TAG); + if (err) + return err; + + err = ufshcd_get_tr_ocs(hba); + if (err) { + dev_err(hba->dev, "Error in OCS:%d\n", err); + return -EINVAL; + } + + resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); + switch (resp) { + case UPIU_TRANSACTION_NOP_IN: + break; + case UPIU_TRANSACTION_QUERY_RSP: + err = ufshcd_check_query_response(hba); + if (!err) + err = ufshcd_copy_query_response(hba); + break; + case UPIU_TRANSACTION_REJECT_UPIU: + /* TODO: handle Reject UPIU Response */ + err = -EPERM; + dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", + __func__); + break; + default: + err = -EINVAL; + dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", + __func__, resp); + } + + return err; +} + +/** + * ufshcd_init_query() - init the query response and request parameters + */ +static inline void ufshcd_init_query(struct ufs_hba *hba, + struct ufs_query_req **request, + struct ufs_query_res **response, + enum query_opcode opcode, + u8 idn, u8 index, u8 selector) +{ + *request = &hba->dev_cmd.query.request; + *response = &hba->dev_cmd.query.response; + memset(*request, 0, sizeof(struct ufs_query_req)); + memset(*response, 0, sizeof(struct ufs_query_res)); + (*request)->upiu_req.opcode = opcode; + (*request)->upiu_req.idn = idn; + (*request)->upiu_req.index = index; + (*request)->upiu_req.selector = selector; +} + +/** + * ufshcd_query_flag() - API function for sending flag query requests + */ +static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, + enum flag_idn idn, bool *flag_res) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err, index = 0, selector = 0; + int timeout = QUERY_REQ_TIMEOUT; + + ufshcd_init_query(hba, &request, &response, opcode, idn, index, + selector); + + switch (opcode) { + case UPIU_QUERY_OPCODE_SET_FLAG: + case UPIU_QUERY_OPCODE_CLEAR_FLAG: + case UPIU_QUERY_OPCODE_TOGGLE_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + break; + case UPIU_QUERY_OPCODE_READ_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + if (!flag_res) { + /* No dummy reads */ + dev_err(hba->dev, "%s: Invalid argument for read request\n", + __func__); + err = -EINVAL; + goto out; + } + break; + default: + dev_err(hba->dev, + "%s: Expected query flag opcode but got = %d\n", + __func__, opcode); + err = -EINVAL; + goto out; + } + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); + + if (err) { + dev_err(hba->dev, + "%s: Sending flag query for idn %d failed, err = %d\n", + __func__, idn, err); + goto out; + } + + if (flag_res) + *flag_res = (be32_to_cpu(response->upiu_res.value) & + MASK_QUERY_UPIU_FLAG_LOC) & 0x1; + +out: + return err; +} + +static int ufshcd_query_flag_retry(struct ufs_hba *hba, + enum query_opcode opcode, + enum flag_idn idn, bool *flag_res) +{ + int ret; + int retries; + + for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { + ret = ufshcd_query_flag(hba, opcode, idn, flag_res); + if (ret) + dev_dbg(hba->dev, + "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + + if (ret) + dev_err(hba->dev, + "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", + __func__, opcode, idn, ret, retries); + return ret; +} + +/** + * ufshcd_query_attr - API function for sending attribute requests + * @hba: per-adapter instance + * @opcode: attribute opcode + * @idn: attribute idn to access + * @index: index field + * @selector: selector field + * @attr_val: the attribute value after the query request completes + * + * Return: 0 for success, non-zero in case of failure. + */ +int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err; + + if (!attr_val) { + dev_err(hba->dev, + "%s: attribute value required for opcode 0x%x\n", + __func__, opcode); + return -EINVAL; + } + + ufshcd_init_query(hba, &request, &response, opcode, idn, index, selector); + + switch (opcode) { + case UPIU_QUERY_OPCODE_WRITE_ATTR: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + request->upiu_req.value = cpu_to_be32(*attr_val); + break; + case UPIU_QUERY_OPCODE_READ_ATTR: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + break; + default: + dev_err(hba->dev, + "%s: Expected query attr opcode but got = 0x%.2x\n", + __func__, opcode); + err = -EINVAL; + goto out; + } + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, + "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", + __func__, opcode, idn, index, err); + goto out; + } + + *attr_val = be32_to_cpu(response->upiu_res.value); + +out: + return err; +} + +/** + * ufshcd_query_attr_retry() - API function for sending query + * attribute with retries + * @hba: per-adapter instance + * @opcode: attribute opcode + * @idn: attribute idn to access + * @index: index field + * @selector: selector field + * @attr_val: the attribute value after the query request + * completes + * + * Return: 0 for success, non-zero in case of failure. + */ +int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, + u32 *attr_val) +{ + int ret = 0; + u32 retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + ret = ufshcd_query_attr(hba, opcode, idn, index, selector, attr_val); + if (ret) + dev_dbg(hba->dev, + "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + + if (ret) + dev_err(hba->dev, + "%s: query attribute, idn %d, failed with error %d after %d retries\n", + __func__, idn, ret, QUERY_REQ_RETRIES); + return ret; +} + +static int __ufshcd_query_descriptor(struct ufs_hba *hba, + enum query_opcode opcode, + enum desc_idn idn, u8 index, u8 selector, + u8 *desc_buf, int *buf_len) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err; + + if (!desc_buf) { + dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", + __func__, opcode); + err = -EINVAL; + goto out; + } + + if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { + dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", + __func__, *buf_len); + err = -EINVAL; + goto out; + } + + ufshcd_init_query(hba, &request, &response, opcode, idn, index, + selector); + hba->dev_cmd.query.descriptor = desc_buf; + request->upiu_req.length = cpu_to_be16(*buf_len); + + switch (opcode) { + case UPIU_QUERY_OPCODE_WRITE_DESC: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + break; + case UPIU_QUERY_OPCODE_READ_DESC: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + break; + default: + dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n", + __func__, opcode); + err = -EINVAL; + goto out; + } + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", + __func__, opcode, idn, index, err); + goto out; + } + + hba->dev_cmd.query.descriptor = NULL; + *buf_len = be16_to_cpu(response->upiu_res.length); + +out: + return err; +} + +/** + * ufshcd_query_descriptor_retry - API function for sending descriptor requests + */ +static int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode, + enum desc_idn idn, u8 index, u8 selector, + u8 *desc_buf, int *buf_len) +{ + int err; + int retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + err = __ufshcd_query_descriptor(hba, opcode, idn, index, + selector, desc_buf, buf_len); + if (!err || err == -EINVAL) + break; + } + + return err; +} + +/** + * ufshcd_read_desc_length - read the specified descriptor length from header + */ +static int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id, + int desc_index, int *desc_length) +{ + int ret; + u8 header[QUERY_DESC_HDR_SIZE]; + int header_len = QUERY_DESC_HDR_SIZE; + + if (desc_id >= QUERY_DESC_IDN_MAX) + return -EINVAL; + + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, header, + &header_len); + + if (ret) { + dev_err(hba->dev, "%s: Failed to get descriptor header id %d\n", + __func__, desc_id); + return ret; + } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) { + dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch\n", + __func__, header[QUERY_DESC_DESC_TYPE_OFFSET], + desc_id); + ret = -EINVAL; + } + + *desc_length = header[QUERY_DESC_LENGTH_OFFSET]; + + return ret; +} + +static void ufshcd_init_desc_sizes(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0, + &hba->desc_size.dev_desc); + if (err) + hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0, + &hba->desc_size.pwr_desc); + if (err) + hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0, + &hba->desc_size.interc_desc); + if (err) + hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0, + &hba->desc_size.conf_desc); + if (err) + hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0, + &hba->desc_size.unit_desc); + if (err) + hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0, + &hba->desc_size.geom_desc); + if (err) + hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, + &hba->desc_size.hlth_desc); + if (err) + hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; +} + +/** + * ufshcd_map_desc_id_to_length - map descriptor IDN to its length + * + */ +static int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, + int *desc_len) +{ + switch (desc_id) { + case QUERY_DESC_IDN_DEVICE: + *desc_len = hba->desc_size.dev_desc; + break; + case QUERY_DESC_IDN_POWER: + *desc_len = hba->desc_size.pwr_desc; + break; + case QUERY_DESC_IDN_GEOMETRY: + *desc_len = hba->desc_size.geom_desc; + break; + case QUERY_DESC_IDN_CONFIGURATION: + *desc_len = hba->desc_size.conf_desc; + break; + case QUERY_DESC_IDN_UNIT: + *desc_len = hba->desc_size.unit_desc; + break; + case QUERY_DESC_IDN_INTERCONNECT: + *desc_len = hba->desc_size.interc_desc; + break; + case QUERY_DESC_IDN_STRING: + *desc_len = QUERY_DESC_MAX_SIZE; + break; + case QUERY_DESC_IDN_HEALTH: + *desc_len = hba->desc_size.hlth_desc; + break; + case QUERY_DESC_IDN_RFU_0: + case QUERY_DESC_IDN_RFU_1: + *desc_len = 0; + break; + default: + *desc_len = 0; + return -EINVAL; + } + return 0; +} + +/** + * ufshcd_read_desc_param - read the specified descriptor parameter + * + */ +static int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, + int desc_index, u8 param_offset, + u8 *param_read_buf, u8 param_size) +{ + int ret; + u8 *desc_buf; + int buff_len; + bool is_kmalloc = true; + + /* Safety check */ + if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) + return -EINVAL; + + /* Get the max length of descriptor from structure filled up at probe + * time. + */ + ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); + + /* Sanity checks */ + if (ret || !buff_len) { + dev_err(hba->dev, "%s: Failed to get full descriptor length\n", + __func__); + return ret; + } + + /* Check whether we need temp memory */ + if (param_offset != 0 || param_size < buff_len) { + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) + return -ENOMEM; + } else { + desc_buf = param_read_buf; + is_kmalloc = false; + } + + /* Request for full descriptor */ + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, desc_buf, + &buff_len); + + if (ret) { + dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n", + __func__, desc_id, desc_index, param_offset, ret); + goto out; + } + + /* Sanity check */ + if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { + dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", + __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); + ret = -EINVAL; + goto out; + } + + /* Check wherher we will not copy more data, than available */ + if (is_kmalloc && param_size > buff_len) + param_size = buff_len; + + if (is_kmalloc) + memcpy(param_read_buf, &desc_buf[param_offset], param_size); +out: + if (is_kmalloc) + kfree(desc_buf); + return ret; +} + +/* replace non-printable or non-ASCII characters with spaces */ +static inline void ufshcd_remove_non_printable(uint8_t *val) +{ + if (!val) + return; + + if (*val < 0x20 || *val > 0x7e) + *val = ' '; +} + +/** + * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power + * state) and waits for it to take effect. + * + */ +static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) +{ + unsigned long start = 0; + u8 status; + int ret; + + ret = ufshcd_send_uic_cmd(hba, cmd); + if (ret) { + dev_err(hba->dev, + "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", + cmd->command, cmd->argument3, ret); + + return ret; + } + + start = get_timer(0); + do { + status = ufshcd_get_upmcrs(hba); + if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { + dev_err(hba->dev, + "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", + cmd->command, status); + ret = (status != PWR_OK) ? status : -1; + break; + } + } while (status != PWR_LOCAL); + + return ret; +} + +/** + * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change + * using DME_SET primitives. + */ +static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_SET; + uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); + uic_cmd.argument3 = mode; + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + + return ret; +} + +static +void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba, + struct scsi_cmd *pccb, u32 upiu_flags) +{ + struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; + unsigned int cdb_len; + + /* command descriptor fields */ + ucd_req_ptr->header.dword_0 = + UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags, + pccb->lun, TASK_TAG); + ucd_req_ptr->header.dword_1 = + UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); + + /* Total EHS length and Data segment length will be zero */ + ucd_req_ptr->header.dword_2 = 0; + + ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen); + + cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE); + memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); + memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len); + + memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); + ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr)); + ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); +} + +static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry, + unsigned char *buf, ulong len) +{ + entry->size = cpu_to_le32(len) | GENMASK(1, 0); + entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf)); + entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf)); +} + +static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb) +{ + struct utp_transfer_req_desc *req_desc = hba->utrdl; + struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr; + ulong datalen = pccb->datalen; + int table_length; + u8 *buf; + int i; + + if (!datalen) { + req_desc->prd_table_length = 0; + ufshcd_cache_flush(req_desc, sizeof(*req_desc)); + return; + } + + table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY); + buf = pccb->pdata; + i = table_length; + while (--i) { + prepare_prdt_desc(&prd_table[table_length - i - 1], buf, + MAX_PRDT_ENTRY - 1); + buf += MAX_PRDT_ENTRY; + datalen -= MAX_PRDT_ENTRY; + } + + prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1); + + req_desc->prd_table_length = table_length; + ufshcd_cache_flush(prd_table, sizeof(*prd_table) * table_length); + ufshcd_cache_flush(req_desc, sizeof(*req_desc)); +} + +static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb) +{ + struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent); + u32 upiu_flags; + int ocs, result = 0; + u8 scsi_status; + + ufshcd_prepare_req_desc_hdr(hba, &upiu_flags, pccb->dma_dir); + ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags); + prepare_prdt_table(hba, pccb); + + ufshcd_cache_flush(pccb->pdata, pccb->datalen); + + ufshcd_send_command(hba, TASK_TAG); + + ufshcd_cache_invalidate(pccb->pdata, pccb->datalen); + + ocs = ufshcd_get_tr_ocs(hba); + switch (ocs) { + case OCS_SUCCESS: + result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); + switch (result) { + case UPIU_TRANSACTION_RESPONSE: + result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr); + + scsi_status = result & MASK_SCSI_STATUS; + if (scsi_status) + return -EINVAL; + + break; + case UPIU_TRANSACTION_REJECT_UPIU: + /* TODO: handle Reject UPIU Response */ + dev_err(hba->dev, + "Reject UPIU not fully implemented\n"); + return -EINVAL; + default: + dev_err(hba->dev, + "Unexpected request response code = %x\n", + result); + return -EINVAL; + } + break; + default: + dev_err(hba->dev, "OCS error from controller = %x\n", ocs); + return -EINVAL; + } + + return 0; +} + +static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, + int desc_index, u8 *buf, u32 size) +{ + return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); +} + +static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) +{ + return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); +} + +/** + * ufshcd_read_string_desc - read string descriptor + * + */ +static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, + u8 *buf, u32 size, bool ascii) +{ + int err = 0; + + err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf, + size); + + if (err) { + dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", + __func__, QUERY_REQ_RETRIES, err); + goto out; + } + + if (ascii) { + int desc_len; + int ascii_len; + int i; + u8 *buff_ascii; + + desc_len = buf[0]; + /* remove header and divide by 2 to move from UTF16 to UTF8 */ + ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; + if (size < ascii_len + QUERY_DESC_HDR_SIZE) { + dev_err(hba->dev, "%s: buffer allocated size is too small\n", + __func__); + err = -ENOMEM; + goto out; + } + + buff_ascii = kmalloc(ascii_len, GFP_KERNEL); + if (!buff_ascii) { + err = -ENOMEM; + goto out; + } + + /* + * the descriptor contains string in UTF16 format + * we need to convert to utf-8 so it can be displayed + */ + utf16_to_utf8(buff_ascii, + (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len); + + /* replace non-printable or non-ASCII characters with spaces */ + for (i = 0; i < ascii_len; i++) + ufshcd_remove_non_printable(&buff_ascii[i]); + + memset(buf + QUERY_DESC_HDR_SIZE, 0, + size - QUERY_DESC_HDR_SIZE); + memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); + buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; + kfree(buff_ascii); + } +out: + return err; +} + +static int ufs_get_device_desc(struct ufs_hba *hba, + struct ufs_dev_desc *dev_desc) +{ + int err; + size_t buff_len; + u8 model_index; + u8 *desc_buf; + + buff_len = max_t(size_t, hba->desc_size.dev_desc, + QUERY_DESC_MAX_SIZE + 1); + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) { + err = -ENOMEM; + goto out; + } + + err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); + if (err) { + dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", + __func__, err); + goto out; + } + + /* + * getting vendor (manufacturerID) and Bank Index in big endian + * format + */ + dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | + desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; + + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + + /* Zero-pad entire buffer for string termination. */ + memset(desc_buf, 0, buff_len); + + err = ufshcd_read_string_desc(hba, model_index, desc_buf, + QUERY_DESC_MAX_SIZE, true/*ASCII*/); + if (err) { + dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", + __func__, err); + goto out; + } + + desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; + strlcpy(dev_desc->model, (char *)(desc_buf + QUERY_DESC_HDR_SIZE), + min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET], + MAX_MODEL_LEN)); + + /* Null terminate the model string */ + dev_desc->model[MAX_MODEL_LEN] = '\0'; + +out: + kfree(desc_buf); + return err; +} + +struct ufs_ref_clk { + unsigned long freq_hz; + enum ufs_ref_clk_freq val; +}; + +static const struct ufs_ref_clk ufs_ref_clk_freqs[] = { + {19200000, REF_CLK_FREQ_19_2_MHZ}, + {26000000, REF_CLK_FREQ_26_MHZ}, + {38400000, REF_CLK_FREQ_38_4_MHZ}, + {52000000, REF_CLK_FREQ_52_MHZ}, + {0, REF_CLK_FREQ_INVAL}, +}; + +static enum ufs_ref_clk_freq +ufs_get_bref_clk_from_hz(unsigned long freq) +{ + int i; + + for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++) + if (ufs_ref_clk_freqs[i].freq_hz == freq) + return ufs_ref_clk_freqs[i].val; + + return REF_CLK_FREQ_INVAL; +} + +enum ufs_ref_clk_freq ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) +{ + unsigned long freq; + + freq = clk_get_rate(refclk); + return ufs_get_bref_clk_from_hz(freq); +} + +static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) +{ + int err; + struct clk *ref_clk; + u32 host_ref_clk_freq; + u32 dev_ref_clk_freq; + + /* get ref_clk */ + ref_clk = devm_clk_get(hba->dev, "ref_clk"); + if (IS_ERR((const void *)ref_clk)) { + err = PTR_ERR(ref_clk); + goto out; + } + + host_ref_clk_freq = ufshcd_parse_dev_ref_clk_freq(hba, ref_clk); + if (host_ref_clk_freq == REF_CLK_FREQ_INVAL) + dev_err(hba->dev, + "invalid ref_clk setting = %ld\n", clk_get_rate(ref_clk)); + + if (host_ref_clk_freq == REF_CLK_FREQ_INVAL) + goto out; + + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &dev_ref_clk_freq); + + if (err) { + dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", err); + goto out; + } + + if (dev_ref_clk_freq == host_ref_clk_freq) + goto out; /* nothing to update */ + + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &host_ref_clk_freq); + + if (err) { + dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", + ufs_ref_clk_freqs[host_ref_clk_freq].freq_hz); + goto out; + } + + dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", + ufs_ref_clk_freqs[host_ref_clk_freq].freq_hz); + +out: + return err; +} + +/** + * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device + */ +static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) +{ + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; + + if (hba->max_pwr_info.is_valid) + return 0; + + if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) { + pwr_info->pwr_tx = FASTAUTO_MODE; + pwr_info->pwr_rx = FASTAUTO_MODE; + } else { + pwr_info->pwr_tx = FAST_MODE; + pwr_info->pwr_rx = FAST_MODE; + } + pwr_info->hs_rate = PA_HS_MODE_B; + + /* Get the connected lane count */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), + &pwr_info->lane_rx); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &pwr_info->lane_tx); + + if (!pwr_info->lane_rx || !pwr_info->lane_tx) { + dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", + __func__, pwr_info->lane_rx, pwr_info->lane_tx); + return -EINVAL; + } + + /* + * First, get the maximum gears of HS speed. + * If a zero value, it means there is no HSGEAR capability. + * Then, get the maximum gears of PWM speed. + */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); + if (!pwr_info->gear_rx) { + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), + &pwr_info->gear_rx); + if (!pwr_info->gear_rx) { + dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", + __func__, pwr_info->gear_rx); + return -EINVAL; + } + pwr_info->pwr_rx = SLOW_MODE; + } + + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), + &pwr_info->gear_tx); + if (!pwr_info->gear_tx) { + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), + &pwr_info->gear_tx); + if (!pwr_info->gear_tx) { + dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", + __func__, pwr_info->gear_tx); + return -EINVAL; + } + pwr_info->pwr_tx = SLOW_MODE; + } + + hba->max_pwr_info.is_valid = true; + return ufshcd_ops_get_max_pwr_mode(hba, &hba->max_pwr_info); +} + +static int ufshcd_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode) +{ + int ret; + + /* if already configured to the requested pwr_mode */ + if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && + pwr_mode->gear_tx == hba->pwr_info.gear_tx && + pwr_mode->lane_rx == hba->pwr_info.lane_rx && + pwr_mode->lane_tx == hba->pwr_info.lane_tx && + pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && + pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && + pwr_mode->hs_rate == hba->pwr_info.hs_rate) { + dev_dbg(hba->dev, "%s: power already configured\n", __func__); + return 0; + } + + /* + * Configure attributes for power mode change with below. + * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, + * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, + * - PA_HSSERIES + */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), + pwr_mode->lane_rx); + if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); + else + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), + pwr_mode->lane_tx); + if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); + else + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); + + if (pwr_mode->pwr_rx == FASTAUTO_MODE || + pwr_mode->pwr_tx == FASTAUTO_MODE || + pwr_mode->pwr_rx == FAST_MODE || + pwr_mode->pwr_tx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), + pwr_mode->hs_rate); + + ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 | + pwr_mode->pwr_tx); + + if (ret) { + dev_err(hba->dev, + "%s: power mode change failed %d\n", __func__, ret); + + return ret; + } + + /* Copy new Power Mode to power info */ + memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr)); + + return ret; +} + +/** + * ufshcd_verify_dev_init() - Verify device initialization + * + */ +static int ufshcd_verify_dev_init(struct ufs_hba *hba) +{ + int retries; + int err; + + for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, + NOP_OUT_TIMEOUT); + if (!err || err == -ETIMEDOUT) + break; + + dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); + } + + if (err) + dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); + + return err; +} + +/** + * ufshcd_complete_dev_init() - checks device readiness + */ +static int ufshcd_complete_dev_init(struct ufs_hba *hba) +{ + int i; + int err; + bool flag_res = 1; + + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, NULL); + if (err) { + dev_err(hba->dev, + "%s setting fDeviceInit flag failed with error %d\n", + __func__, err); + goto out; + } + + /* poll for max. 1000 iterations for fDeviceInit flag to clear */ + for (i = 0; i < 1000 && !err && flag_res; i++) + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, + &flag_res); + + if (err) + dev_err(hba->dev, + "%s reading fDeviceInit flag failed with error %d\n", + __func__, err); + else if (flag_res) + dev_err(hba->dev, + "%s fDeviceInit was not cleared by the device\n", + __func__); + +out: + return err; +} + +static void ufshcd_def_desc_sizes(struct ufs_hba *hba) +{ + hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; + hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; + hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; + hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; + hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; + hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; + hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; +} + +static int ufs_start(struct ufs_hba *hba) +{ + struct ufs_dev_desc card = {0}; + int ret; + + ret = ufshcd_link_startup(hba); + if (ret) + return ret; + + ret = ufshcd_verify_dev_init(hba); + if (ret) + return ret; + + ret = ufshcd_complete_dev_init(hba); + if (ret) + return ret; + + /* Init check for device descriptor sizes */ + ufshcd_init_desc_sizes(hba); + + ret = ufs_get_device_desc(hba, &card); + if (ret) { + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", + __func__, ret); + + return ret; + } + + ufshcd_set_dev_ref_clk(hba); + + if (ufshcd_get_max_pwr_mode(hba)) { + dev_err(hba->dev, + "%s: Failed getting max supported power mode\n", + __func__); + } else { + ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info); + if (ret) { + dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", + __func__, ret); + + return ret; + } + + debug("UFS Device %s is up!\n", hba->dev->name); + ufshcd_print_pwr_info(hba); + } + + return 0; +} + +int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops) +{ + struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev); + struct scsi_plat *scsi_plat; + struct udevice *scsi_dev; + void __iomem *mmio_base; + int err; + + device_find_first_child(ufs_dev, &scsi_dev); + if (!scsi_dev) + return -ENODEV; + + scsi_plat = dev_get_uclass_plat(scsi_dev); + scsi_plat->max_id = UFSHCD_MAX_ID; + scsi_plat->max_lun = UFS_MAX_LUNS; + scsi_plat->max_bytes_per_req = UFS_MAX_BYTES; + + hba->dev = ufs_dev; + hba->ops = hba_ops; + + if (device_is_on_pci_bus(ufs_dev)) { + mmio_base = dm_pci_map_bar(ufs_dev, PCI_BASE_ADDRESS_0, 0, 0, + PCI_REGION_TYPE, PCI_REGION_MEM); + } else { + mmio_base = dev_read_addr_ptr(ufs_dev); + } + hba->mmio_base = mmio_base; + + /* Set descriptor lengths to specification defaults */ + ufshcd_def_desc_sizes(hba); + + ufshcd_ops_init(hba); + + /* Read capabilities registers */ + hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); + if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) + hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; + + /* Get UFS version supported by the controller */ + hba->version = ufshcd_get_ufs_version(hba); + if (hba->version != UFSHCI_VERSION_10 && + hba->version != UFSHCI_VERSION_11 && + hba->version != UFSHCI_VERSION_20 && + hba->version != UFSHCI_VERSION_21 && + hba->version != UFSHCI_VERSION_30 && + hba->version != UFSHCI_VERSION_31 && + hba->version != UFSHCI_VERSION_40) + dev_err(hba->dev, "invalid UFS version 0x%x\n", + hba->version); + + /* Get Interrupt bit mask per version */ + hba->intr_mask = ufshcd_get_intr_mask(hba); + + /* Allocate memory for host memory space */ + err = ufshcd_memory_alloc(hba); + if (err) { + dev_err(hba->dev, "Memory allocation failed\n"); + return err; + } + + /* Configure Local data structures */ + ufshcd_host_memory_configure(hba); + + /* + * In order to avoid any spurious interrupt immediately after + * registering UFS controller interrupt handler, clear any pending UFS + * interrupt status and disable all the UFS interrupts. + */ + ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), + REG_INTERRUPT_STATUS); + ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); + + mb(); /* flush previous writes */ + + /* Reset the attached device */ + ufshcd_device_reset(hba); + + err = ufshcd_hba_enable(hba); + if (err) { + dev_err(hba->dev, "Host controller enable failed\n"); + return err; + } + + err = ufs_start(hba); + if (err) + return err; + + return 0; +} + +#if IS_ENABLED(CONFIG_BOUNCE_BUFFER) +static int ufs_scsi_buffer_aligned(struct udevice *scsi_dev, struct bounce_buffer *state) +{ +#ifdef CONFIG_PHYS_64BIT + struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent); + uintptr_t ubuf = (uintptr_t)state->user_buffer; + size_t len = state->len_aligned; + + /* Check if below 32bit boundary */ + if ((hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) && + ((ubuf >> 32) || (ubuf + len) >> 32)) { + dev_dbg(scsi_dev, "Buffer above 32bit boundary %lx-%lx\n", + ubuf, ubuf + len); + return 0; + } +#endif + return 1; +} +#endif /* CONFIG_BOUNCE_BUFFER */ + +static struct scsi_ops ufs_ops = { + .exec = ufs_scsi_exec, +#if IS_ENABLED(CONFIG_BOUNCE_BUFFER) + .buffer_aligned = ufs_scsi_buffer_aligned, +#endif /* CONFIG_BOUNCE_BUFFER */ +}; + +int ufs_probe_dev(int index) +{ + struct udevice *dev; + + return uclass_get_device(UCLASS_UFS, index, &dev); +} + +int ufs_probe(void) +{ + struct udevice *dev; + int ret, i; + + for (i = 0;; i++) { + ret = uclass_get_device(UCLASS_UFS, i, &dev); + if (ret == -ENODEV) + break; + } + + return 0; +} + +U_BOOT_DRIVER(ufs_scsi) = { + .id = UCLASS_SCSI, + .name = "ufs_scsi", + .ops = &ufs_ops, +}; + +static int ufs_post_bind(struct udevice *dev) +{ + return device_bind_driver(dev, "ufs_scsi", "ufs_scsi", NULL); +} + +UCLASS_DRIVER(ufs) = { + .id = UCLASS_UFS, + .name = "ufs", + .post_bind = ufs_post_bind, + .per_device_auto = sizeof(struct ufs_hba), +}; -- cgit v1.2.3 From 4ced7e726490911de9a632ea13a55b6776c2e210 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Tue, 28 Oct 2025 15:22:26 +0100 Subject: ufs: renesas: Update Kconfig entry help text The current Renesas UFS driver contains initialization code that is specific to R-Car S4 R8A779F0. The upcoming R-Car X5H initialization code is different and contained in a separate driver. Update the Kconfig entry help text for the current driver to help discern it from the X5H driver. No functional change. Reviewed-by: Neil Armstrong Signed-off-by: Marek Vasut Link: https://patch.msgid.link/20251028142335.18125-6-marek.vasut+renesas@mailbox.org Signed-off-by: Neil Armstrong --- drivers/ufs/Kconfig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig index 6b980bce1e9..3f8d8b50a32 100644 --- a/drivers/ufs/Kconfig +++ b/drivers/ufs/Kconfig @@ -71,12 +71,12 @@ config UFS_PCI If unsure, say N. config UFS_RENESAS - bool "Renesas specific hooks to UFS controller platform driver" + bool "Renesas R-Car S4 UFS Controller support" depends on UFS select BOUNCE_BUFFER help - This selects the Renesas specific additions to UFSHCD platform driver. - UFS host on Renesas needs some vendor specific configuration before - accessing the hardware. + This selects the Renesas S4 specific additions to UFSHCD + platform driver. UFS host on Renesas needs some vendor + specific configuration before accessing the hardware. endmenu -- cgit v1.2.3 From 3351fe7ecc1ac717c23e9ad2c16e1fe4a7b84fd2 Mon Sep 17 00:00:00 2001 From: Tuyen Dang Date: Tue, 28 Oct 2025 15:22:27 +0100 Subject: ufs: Add UFS driver for Renesas R-Car X5H Add UFS driver for UFS controller present on Renesas R-Car X5H R8A78000. The controller uses different initialization code compared to previous generation UFS controller present in Renesas R-Car S4 R8A779F0, and the majority of the driver is the initialization, hence a new driver. [Marek: Clean driver up, add SCMI reset handling, use read_poll_timeout(), pass error values out of ufs_renesas_pre_init(), change the compatible string to "renesas,r8a78000-ufs" to align with previous generation "renesas,r8a779f0-ufs"] Signed-off-by: Yoshihiro Shimoda Signed-off-by: Tuyen Dang Signed-off-by: Marek Vasut Reviewed-by: Neil Armstrong Link: https://patch.msgid.link/20251028142335.18125-7-marek.vasut+renesas@mailbox.org Signed-off-by: Neil Armstrong --- drivers/ufs/Kconfig | 9 ++ drivers/ufs/Makefile | 1 + drivers/ufs/ufs-renesas-rcar-gen5.c | 268 ++++++++++++++++++++++++++++++++++++ 3 files changed, 278 insertions(+) create mode 100644 drivers/ufs/ufs-renesas-rcar-gen5.c diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig index 3f8d8b50a32..68693b7d226 100644 --- a/drivers/ufs/Kconfig +++ b/drivers/ufs/Kconfig @@ -79,4 +79,13 @@ config UFS_RENESAS platform driver. UFS host on Renesas needs some vendor specific configuration before accessing the hardware. +config UFS_RENESAS_GEN5 + bool "Renesas R-Car X5H UFS Controller support" + depends on UFS + select BOUNCE_BUFFER + help + This selects the Renesas X5H specific additions to UFSHCD + platform driver. UFS host on Renesas needs some vendor + specific configuration before accessing the hardware. + endmenu diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile index 4915b12e623..15329b9795a 100644 --- a/drivers/ufs/Makefile +++ b/drivers/ufs/Makefile @@ -11,4 +11,5 @@ obj-$(CONFIG_TI_J721E_UFS) += ti-j721e-ufs.o obj-$(CONFIG_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o obj-$(CONFIG_UFS_PCI) += ufs-pci.o obj-$(CONFIG_UFS_RENESAS) += ufs-renesas.o +obj-$(CONFIG_UFS_RENESAS_GEN5) += ufs-renesas-rcar-gen5.o obj-$(CONFIG_ROCKCHIP_UFS) += ufs-rockchip.o diff --git a/drivers/ufs/ufs-renesas-rcar-gen5.c b/drivers/ufs/ufs-renesas-rcar-gen5.c new file mode 100644 index 00000000000..3c66022579f --- /dev/null +++ b/drivers/ufs/ufs-renesas-rcar-gen5.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Renesas UFS host controller driver + * + * Copyright (C) 2025 Renesas Electronics Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ufs.h" + +#define UFS_RENESAS_TIMEOUT_US 100000 + +struct ufs_renesas_priv { + struct clk_bulk clks; + struct reset_ctl_bulk resets; + fdt_addr_t phy_base; + /* The hardware needs initialization once */ + bool initialized; +}; + +static void ufs_dme_command(struct ufs_hba *hba, u32 cmd, + u32 arg1, u32 arg2, u32 arg3) +{ + ufshcd_writel(hba, arg1, REG_UIC_COMMAND_ARG_1); + ufshcd_writel(hba, arg2, REG_UIC_COMMAND_ARG_2); + ufshcd_writel(hba, arg3, REG_UIC_COMMAND_ARG_3); + ufshcd_writel(hba, cmd, REG_UIC_COMMAND); +} + +static int ufs_renesas_pre_init(struct ufs_hba *hba) +{ + struct ufs_renesas_priv *priv = dev_get_priv(hba->dev); + u32 val32; + u16 val16; + int ret; + + writew(0x0001, priv->phy_base + 0x20000); + writew(0x005c, priv->phy_base + 0x20212); + writew(0x005c, priv->phy_base + 0x20214); + writew(0x005c, priv->phy_base + 0x20216); + writew(0x005c, priv->phy_base + 0x20218); + writew(0x036a, priv->phy_base + 0x201d0); + writew(0x0102, priv->phy_base + 0x201d2); + writew(0x001f, priv->phy_base + 0x20082); + writew(0x000b, priv->phy_base + 0x20084); + writew(0x0126, priv->phy_base + 0x201d2); + writew(0x01dc, priv->phy_base + 0x20214); + writew(0x01dc, priv->phy_base + 0x20218); + writew(0x0000, priv->phy_base + 0x201cc); + writew(0x0200, priv->phy_base + 0x201ce); + writew(0x0000, priv->phy_base + 0x20212); + writew(0x0000, priv->phy_base + 0x20216); + + ret = readw_poll_timeout(priv->phy_base + 0x201ec, val16, + !(val16 & BIT(12)), UFS_RENESAS_TIMEOUT_US); + if (ret) + return ret; + + ret = readw_poll_timeout(priv->phy_base + 0x201e4, val16, + !(val16 & BIT(12)), UFS_RENESAS_TIMEOUT_US); + if (ret) + return ret; + + ret = readw_poll_timeout(priv->phy_base + 0x201f0, val16, + !(val16 & BIT(12)), UFS_RENESAS_TIMEOUT_US); + if (ret) + return ret; + + writew(0x0000, priv->phy_base + 0x20000); + + ufshcd_writel(hba, BIT(0), REG_CONTROLLER_ENABLE); + + ret = read_poll_timeout(ufshcd_readl, val32, (val32 & BIT(0)), + 1, UFS_RENESAS_TIMEOUT_US, + hba, REG_CONTROLLER_ENABLE); + if (ret) + return ret; + + ret = read_poll_timeout(ufshcd_readl, val32, (val32 & BIT(3)), + 1, UFS_RENESAS_TIMEOUT_US, + hba, REG_CONTROLLER_STATUS); + if (ret) + return ret; + + /* Skip IE because we cannot handle interrupts here */ + ufs_dme_command(hba, 0x00000002, 0x81010000, 0x00000000, 0x00000005); + ufs_dme_command(hba, 0x00000002, 0x81150000, 0x00000000, 0x00000001); + ufs_dme_command(hba, 0x00000002, 0x81180000, 0x00000000, 0x00000001); + ufs_dme_command(hba, 0x00000002, 0x80090000, 0x00000000, 0x00000000); + ufs_dme_command(hba, 0x00000002, 0x800a0000, 0x00000000, 0x000000c8); + ufs_dme_command(hba, 0x00000002, 0x80090001, 0x00000000, 0x00000000); + ufs_dme_command(hba, 0x00000002, 0x800a0001, 0x00000000, 0x000000c8); + ufs_dme_command(hba, 0x00000002, 0x800a0004, 0x00000000, 0x00000000); + ufs_dme_command(hba, 0x00000002, 0x800b0004, 0x00000000, 0x00000064); + ufs_dme_command(hba, 0x00000002, 0x800a0005, 0x00000000, 0x00000000); + ufs_dme_command(hba, 0x00000002, 0x800b0005, 0x00000000, 0x00000064); + ufs_dme_command(hba, 0x00000002, 0xd0850000, 0x00000000, 0x00000001); + + writew(0x0001, priv->phy_base + 0x20000); + + clrbits_le16(priv->phy_base + 0x20022, BIT(0)); + + ret = readw_poll_timeout(priv->phy_base + (0x00198 << 1), val16, + (val16 & BIT(0)), UFS_RENESAS_TIMEOUT_US); + if (ret) + return ret; + + writew(0x0368, priv->phy_base + 0x201d0); + + ret = readw_poll_timeout(priv->phy_base + 0x201e4, val16, + !(val16 & BIT(11)), UFS_RENESAS_TIMEOUT_US); + if (ret) + return ret; + + ret = readw_poll_timeout(priv->phy_base + 0x201e8, val16, + !(val16 & BIT(11)), UFS_RENESAS_TIMEOUT_US); + if (ret) + return ret; + + ret = readw_poll_timeout(priv->phy_base + 0x201ec, val16, + !(val16 & BIT(11)), UFS_RENESAS_TIMEOUT_US); + if (ret) + return ret; + + ret = readw_poll_timeout(priv->phy_base + 0x201f0, val16, + !(val16 & BIT(11)), UFS_RENESAS_TIMEOUT_US); + if (ret) + return ret; + + priv->initialized = true; + + return 0; +} + +static int ufs_renesas_init(struct ufs_hba *hba) +{ + hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO | + UFSHCD_QUIRK_BROKEN_LCC; + + return 0; +} + +static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_renesas_priv *priv = dev_get_priv(hba->dev); + int ret; + + if (priv->initialized) + return 0; + + if (status == PRE_CHANGE) { + ret = ufs_renesas_pre_init(hba); + if (ret) + return ret; + } + + priv->initialized = true; + + return 0; +} + +static int ufs_renesas_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + if (status == PRE_CHANGE) + return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); + + return 0; +} + +static int ufs_renesas_get_max_pwr_mode(struct ufs_hba *hba, + struct ufs_pwr_mode_info *max_pwr_info) +{ + max_pwr_info->info.gear_rx = UFS_HS_G5; + max_pwr_info->info.gear_tx = UFS_HS_G5; + max_pwr_info->info.pwr_tx = FASTAUTO_MODE; + max_pwr_info->info.pwr_rx = FASTAUTO_MODE; + max_pwr_info->info.hs_rate = PA_HS_MODE_A; + + max_pwr_info->info.lane_rx = 1; + max_pwr_info->info.lane_tx = 1; + + dev_info(hba->dev, "Max HS Gear: %d\n", max_pwr_info->info.gear_rx); + + return 0; +} + +static struct ufs_hba_ops ufs_renesas_vops = { + .init = ufs_renesas_init, + .hce_enable_notify = ufs_renesas_hce_enable_notify, + .link_startup_notify = ufs_renesas_link_startup_notify, + .get_max_pwr_mode = ufs_renesas_get_max_pwr_mode, +}; + +static int ufs_renesas_pltfm_probe(struct udevice *dev) +{ + struct ufs_renesas_priv *priv = dev_get_priv(dev); + int ret; + + priv->phy_base = dev_read_addr_name(dev, "phy"); + if (priv->phy_base == FDT_ADDR_T_NONE) + return -EINVAL; + + ret = reset_get_bulk(dev, &priv->resets); + if (ret < 0) + return ret; + + ret = clk_get_bulk(dev, &priv->clks); + if (ret < 0) + goto err_clk_get; + + ret = clk_enable_bulk(&priv->clks); + if (ret) + goto err_clk_enable; + + reset_assert_bulk(&priv->resets); + reset_deassert_bulk(&priv->resets); + + ret = ufshcd_probe(dev, &ufs_renesas_vops); + if (ret) { + dev_err(dev, "ufshcd_probe() failed %d\n", ret); + goto err_ufshcd_probe; + } + + return 0; + +err_ufshcd_probe: + reset_assert_bulk(&priv->resets); + clk_disable_bulk(&priv->clks); +err_clk_enable: + clk_release_bulk(&priv->clks); +err_clk_get: + reset_release_bulk(&priv->resets); + return ret; +} + +static int ufs_renesas_pltfm_remove(struct udevice *dev) +{ + struct ufs_renesas_priv *priv = dev_get_priv(dev); + + reset_release_bulk(&priv->resets); + clk_disable_bulk(&priv->clks); + clk_release_bulk(&priv->clks); + + return 0; +} + +static const struct udevice_id ufs_renesas_pltfm_ids[] = { + { .compatible = "renesas,r8a78000-ufs" }, + { /* sentinel */ } +}; + +U_BOOT_DRIVER(ufs_renesas) = { + .name = "ufs-renesas-gen5", + .id = UCLASS_UFS, + .of_match = ufs_renesas_pltfm_ids, + .probe = ufs_renesas_pltfm_probe, + .remove = ufs_renesas_pltfm_remove, + .priv_auto = sizeof(struct ufs_renesas_priv), +}; -- cgit v1.2.3