PCI changes for the v4.9 merge window:

Altera host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Remove unused platform data (Bjorn Helgaas)
     Remove redundant platform_get_resource() return value check (Bjorn Helgaas)
     Rename altera_pcie_valid_config() to altera_pcie_valid_device() (Bjorn Helgaas)
     Simplify TLB_CFG_DW0 usage (Bjorn Helgaas)
     Simplify TLP_CFG_DW1 usage (Bjorn Helgaas)
 
   APM X-Gene host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Remove unused platform data (Bjorn Helgaas)
     Pass struct xgene_pcie_port to setup functions (Bjorn Helgaas)
     Add register accessors (Bjorn Helgaas)
 
   Axis ARTPEC-6 host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Remove unused platform data (Bjorn Helgaas)
     Add register accessors (Bjorn Helgaas)
     Use generic DesignWare accessors (Bjorn Helgaas)
     Remove unnecessary artpec6_pcie_link_up() (Bjorn Helgaas)
     Pass device-specific struct to internal functions (Bjorn Helgaas)
     Add resource name comments (Bjorn Helgaas)
 
   Broadcom iProc host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Set drvdata at end of probe function (Bjorn Helgaas)
     Validate CSR base in BCMA setup code (Bjorn Helgaas)
     Remove redundant null pointer checking (Bjorn Helgaas)
     Hard-code PCIe capability offset instead of searching (Bjorn Helgaas)
 
   Freescale i.MX6 host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Remove redundant of_node pointer (Bjorn Helgaas)
     Removed unused struct imx6_pcie.mem_base (Bjorn Helgaas)
     Pass struct imx6_pcie to PHY accessors (Bjorn Helgaas)
     Pass device-specific struct to internal functions (Bjorn Helgaas)
     Use generic DesignWare accessors (Bjorn Helgaas)
     Reorder struct imx6_pcie (Bjorn Helgaas)
     Remove unused return values (Bjorn Helgaas)
 
   Freescale Layerscape host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Remove unused platform data (Bjorn Helgaas)
     Remove redundant struct ls_pcie.dbi (Bjorn Helgaas)
     Pass device-specific struct to internal functions (Bjorn Helgaas)
     Move struct pcie_port setup to probe function (Bjorn Helgaas)
     Remove unused ls_add_pcie_port() platform_device arg (Bjorn Helgaas)
     Reorder struct ls_pcie (Bjorn Helgaas)
 
   HiSilicon host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Remove unused platform data (Bjorn Helgaas)
     Name private struct pointer "hisi_pcie" consistently (Bjorn Helgaas)
     Remove redundant struct hisi_pcie.reg_base (Bjorn Helgaas)
     Use generic DesignWare accessors (Bjorn Helgaas)
     Include register block base in PCIE_SYS_STATE4 address (Bjorn Helgaas)
     Pass device-specific struct to internal functions (Bjorn Helgaas)
     Reorder struct hisi_pcie (Bjorn Helgaas)
 
   Marvell Aardvark host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Remove unused platform data (Bjorn Helgaas)
     MAINTAINERS: Add DT binding to the Aardvark PCIe driver maintainer (Thomas Petazzoni)
 
   Marvell Armada host bridge driver
     Remove unused platform data (Bjorn Helgaas)
     Add local base pointer (Bjorn Helgaas)
     Remove redundant struct armada8k_pcie.base (Bjorn Helgaas)
     Use generic DesignWare accessors (Bjorn Helgaas)
     Pass device-specific struct to internal functions (Bjorn Helgaas)
     Reorder struct armada8k_pcie (Bjorn Helgaas)
     MAINTAINERS: Add maintainer for the PCIe Marvell Armada 8K driver (Thomas Petazzoni)
 
   Marvell MVEBU host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Use existing of_node pointer (Bjorn Helgaas)
 
   NVIDIA Tegra host bridge driver
     Fix argument order in tegra_pcie_phy_disable() (Bjorn Helgaas)
     Add local struct device pointers (Bjorn Helgaas)
     Remove unused platform data (Bjorn Helgaas)
 
   Qualcomm host bridge driver
     Remove unused platform data (Bjorn Helgaas)
     Remove redundant struct qcom_pcie.dbi (Bjorn Helgaas)
     Remove redundant struct qcom_pcie.dev (Bjorn Helgaas)
     Reorder struct qcom_pcie (Bjorn Helgaas)
 
   Renesas R-Car host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Remove unused platform data (Bjorn Helgaas)
     Remove unused rcar_pcie_get_resources() platform_device arg (Bjorn Helgaas)
     Remove DRV_NAME macro (Bjorn Helgaas)
     rcar-gen2: Add local struct device pointers (Bjorn Helgaas)
 
   Rockchip host bridge driver
     Remove unused platform data (Bjorn Helgaas)
     Indent "if" statement body (Dan Carpenter)
 
   Samsung Exynos host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Uninline register accessors (Bjorn Helgaas)
     Name private struct pointer "exynos_pcie" consistently (Bjorn Helgaas)
     Pass device-specific struct to internal functions (Bjorn Helgaas)
     Reorder struct exynos_pcie (Bjorn Helgaas)
 
   ST Microelectronics SPEAr13xx host bridge driver
     Remove unused constants (Bjorn Helgaas)
     Pass device-specific struct to internal functions (Bjorn Helgaas)
     Reorder struct spear13xx_pcie (Bjorn Helgaas)
     Clean up struct device usage (Bjorn Helgaas)
 
   Synopsys DesignWare host bridge driver
     Rename dw_pcie_valid_config() to dw_pcie_valid_device() (Bjorn Helgaas)
     Simplify pcie_host_ops.readl_rc() and .writel_rc() interfaces (Bjorn Helgaas)
     Swap order of dw_pcie_writel_rc() reg/val arguments (Bjorn Helgaas)
     Export dw_pcie_readl_rc(), dw_pcie_writel_rc() (Bjorn Helgaas)
     Uninline register accessors (Bjorn Helgaas)
     Simplify dw_pcie_readl_unroll(), dw_pcie_writel_unroll() (Kishon Vijay Abraham I)
     Swap order of dw_pcie_writel_unroll() reg/val arguments (Bjorn Helgaas)
     designware-plat: Remove redundant dw_plat_pcie.mem_base (Bjorn Helgaas)
     designware-plat: Add local struct device pointers (Bjorn Helgaas)
     designware-plat: Remove unused platform data (Bjorn Helgaas)
 
   TI DRA7xx host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Remove redundant struct device pointer from dra7xx_pcie (Bjorn Helgaas)
     Set drvdata at end of probe function (Bjorn Helgaas)
     Use generic DesignWare accessors (Bjorn Helgaas)
     Pass device-specific struct to internal functions (Bjorn Helgaas)
     Move struct pcie_port setup to probe function (Bjorn Helgaas)
     Reorder struct dra7xx_pcie (Bjorn Helgaas)
 
   TI Keystone host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Use generic DesignWare accessors (Bjorn Helgaas)
     Pass keystone_pcie, not address, to IRQ functions (Bjorn Helgaas)
     Pass keystone_pcie, not va_app_base, to DBI functions (Bjorn Helgaas)
     Add app register accessors (Bjorn Helgaas)
     Reorder struct keystone_pcie (Bjorn Helgaas)
 
   Xilinx AXI host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Remove unused platform data (Bjorn Helgaas)
     Removed unused xilinx_pcie_assign_msi() argument (Bjorn Helgaas)
 
   Xilinx NWL host bridge driver
     Add local struct device pointers (Bjorn Helgaas)
     Remove unused platform data (Bjorn Helgaas)
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJX/5JSAAoJEFmIoMA60/r8DgwQAL3rqok3OMp5mOFPTosLiGA5
 Pibrke/01imrOGT+vxo07K7AZhTkiOoVDDdwrYSOdTELc0bk8cjZUtjjzwQ0p1KP
 Xpq8mbX9EIZKrVhRYAFvtBMehw51l4D8RENF5GnyeJ04MDWzV7re1zsHuWAkUGC5
 9BP6DE0RGjddnyi8gRe39ZRVQofvZT9WgLb5LgnjpTgCpAcSBQBUlyynRlKcqmcY
 S2noNrIDb9KElpbw45egy11p79z6A6h9n6pfrzKff07Y3N1DFK2XeFoyoEHUnABM
 sicjjbR0W/udWm2eONLB8s1tYTJKL3zvNzWI1ywC3ESfmmt2iZ22pu0yxnjmdCRa
 vbBpQp/BfXH0yBd2Onl06EvyIBMEO35vBgiRUW/Su76JTh0jAHQh1k0lvK3l85qK
 mcpWQ5AwGquwywr3IhcAtuGI8SbrTZ2wElOJhRukwbZYF/RDKMKn71dSqd9ULTKl
 0w2hpeJW8Csl+2UwJQTtH9Pld3FXslBa1zM9WqUskxgbsch+ARb1zRwzXJj50E1A
 ekbWb/W7oxDPHKkK+hh5jc0zb/sEBdoBL0Vz1QuP7Al50b1FOS3tX4FdnfVZntTL
 vhDdo+0tCmbVAI6bRLPi0Q2Uyw6fl0f0preDXNs9siNDUjqRSqpGmY/qYXX97MRk
 /pFdD9BJw9uUONm4AQOf
 =Zq8E
 -----END PGP SIGNATURE-----

Merge tag 'pci-v4.9-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci

PCI changes for the v4.9 merge window:
 "Here are some more changes I'd like to have in v4.9.  There's one
  small Tegra bug fix in the PHY poweroff path, which is only used in
  failure paths.

  The rest is all strictly cleanup that should make host bridge drivers
  more readable, but shouldn't actually change any behavior.

  Summary:

   - use local struct device pointers in many host bridge drivers for
     clarity

   - remove unused platform data

   - use generic DesignWare accessors

   - misc cleanups: remove redundant structure entries and re-order
     structure members to put comon generic fields first etc"

* tag 'pci-v4.9-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (108 commits)
  MAINTAINERS: Add maintainer for the PCIe Marvell Armada 8K driver
  MAINTAINERS: Add DT binding to the Aardvark PCIe driver maintainer
  PCI: rockchip: Indent "if" statement body
  PCI: hisi: Reorder struct hisi_pcie
  PCI: hisi: Pass device-specific struct to internal functions
  PCI: hisi: Include register block base in PCIE_SYS_STATE4 address
  PCI: dra7xx: Reorder struct dra7xx_pcie
  PCI: xilinx-nwl: Remove unused platform data
  PCI: xilinx-nwl: Add local struct device pointers
  PCI: xilinx: Removed unused xilinx_pcie_assign_msi() argument
  PCI: xilinx: Remove unused platform data
  PCI: xilinx: Add local struct device pointers
  PCI: xgene: Add register accessors
  PCI: xgene: Pass struct xgene_pcie_port to setup functions
  PCI: xgene: Remove unused platform data
  PCI: tegra: Remove unused platform data
  PCI: tegra: Add local struct device pointers
  PCI: tegra: Fix argument order in tegra_pcie_phy_disable()
  PCI: rockchip: Remove unused platform data
  PCI: rcar-gen2: Add local struct device pointers
  ...
This commit is contained in:
Linus Torvalds 2016-10-13 17:08:58 -07:00
commit 40bd3a5f34
29 changed files with 1108 additions and 1187 deletions

View File

@ -9189,6 +9189,14 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/versatile.txt
F: drivers/pci/host/pci-versatile.c
PCI DRIVER FOR ARMADA 8K
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: Documentation/devicetree/bindings/pci/pci-armada8k.txt
F: drivers/pci/host/pcie-armada8k.c
PCI DRIVER FOR APPLIEDMICRO XGENE
M: Tanmay Inamdar <tinamdar@apm.com>
L: linux-pci@vger.kernel.org
@ -9235,6 +9243,7 @@ M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/pci/aardvark-pci.txt
F: drivers/pci/host/pci-aardvark.c
PCI DRIVER FOR NVIDIA TEGRA

View File

@ -230,20 +230,20 @@ static int advk_pcie_link_up(struct advk_pcie *pcie)
static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
int retries;
/* check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
if (advk_pcie_link_up(pcie)) {
dev_info(&pcie->pdev->dev, "link up\n");
dev_info(dev, "link up\n");
return 0;
}
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
dev_err(&pcie->pdev->dev, "link never came up\n");
dev_err(dev, "link never came up\n");
return -ETIMEDOUT;
}
@ -376,6 +376,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
unsigned int status;
char *strcomp_status, *str_posted;
@ -407,12 +408,13 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
else
str_posted = "Posted";
dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n",
dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
}
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
@ -426,7 +428,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return 0;
}
dev_err(&pcie->pdev->dev, "config read/write timed out\n");
dev_err(dev, "config read/write timed out\n");
return -ETIMEDOUT;
}
@ -560,10 +562,11 @@ static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
{
struct device *dev = &pcie->pdev->dev;
mutex_lock(&pcie->msi_used_lock);
if (!test_bit(hwirq, pcie->msi_irq_in_use))
dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n",
hwirq);
dev_err(dev, "trying to free unused MSI#%d\n", hwirq);
else
clear_bit(hwirq, pcie->msi_irq_in_use);
mutex_unlock(&pcie->msi_used_lock);
@ -910,6 +913,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
static int advk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
struct resource *res;
struct pci_bus *bus, *child;
@ -917,31 +921,29 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct device_node *msi_node;
int ret, irq;
pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie),
GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->pdev = pdev;
platform_set_drvdata(pdev, pcie);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pcie->base = devm_ioremap_resource(&pdev->dev, res);
pcie->base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler,
ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
pcie);
if (ret) {
dev_err(&pdev->dev, "Failed to register interrupt\n");
dev_err(dev, "Failed to register interrupt\n");
return ret;
}
ret = advk_pcie_parse_request_of_pci_ranges(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed to parse resources\n");
dev_err(dev, "Failed to parse resources\n");
return ret;
}
@ -949,24 +951,24 @@ static int advk_pcie_probe(struct platform_device *pdev)
ret = advk_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize irq\n");
dev_err(dev, "Failed to initialize irq\n");
return ret;
}
ret = advk_pcie_init_msi_irq_domain(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize irq\n");
dev_err(dev, "Failed to initialize irq\n");
advk_pcie_remove_irq_domain(pcie);
return ret;
}
msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0);
msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0);
if (msi_node)
msi = of_pci_find_msi_chip_by_node(msi_node);
else
msi = NULL;
bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops,
bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops,
pcie, &pcie->resources, &pcie->msi);
if (!bus) {
advk_pcie_remove_msi_irq_domain(pcie);
@ -980,7 +982,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
}

View File

@ -64,11 +64,10 @@
#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
struct dra7xx_pcie {
void __iomem *base;
struct phy **phy;
int phy_count;
struct device *dev;
struct pcie_port pp;
void __iomem *base; /* DT ti_conf */
int phy_count; /* DT phy-names count */
struct phy **phy;
};
#define to_dra7xx_pcie(x) container_of((x), struct dra7xx_pcie, pp)
@ -84,17 +83,6 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
static inline u32 dra7xx_pcie_readl_rc(struct pcie_port *pp, u32 offset)
{
return readl(pp->dbi_base + offset);
}
static inline void dra7xx_pcie_writel_rc(struct pcie_port *pp, u32 offset,
u32 value)
{
writel(value, pp->dbi_base + offset);
}
static int dra7xx_pcie_link_up(struct pcie_port *pp)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
@ -103,13 +91,14 @@ static int dra7xx_pcie_link_up(struct pcie_port *pp)
return !!(reg & LINK_UP);
}
static int dra7xx_pcie_establish_link(struct pcie_port *pp)
static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
struct pcie_port *pp = &dra7xx->pp;
struct device *dev = pp->dev;
u32 reg;
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "link is already up\n");
dev_err(dev, "link is already up\n");
return 0;
}
@ -120,10 +109,8 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp)
return dw_pcie_wait_for_link(pp);
}
static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
~INTERRUPTS);
dra7xx_pcie_writel(dra7xx,
@ -142,6 +129,8 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
@ -149,10 +138,10 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
dra7xx_pcie_establish_link(pp);
dra7xx_pcie_establish_link(dra7xx);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
dra7xx_pcie_enable_interrupts(pp);
dra7xx_pcie_enable_interrupts(dra7xx);
}
static struct pcie_host_ops dra7xx_pcie_host_ops = {
@ -196,8 +185,8 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
struct dra7xx_pcie *dra7xx = arg;
struct pcie_port *pp = &dra7xx->pp;
u32 reg;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
@ -223,51 +212,51 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
{
struct dra7xx_pcie *dra7xx = arg;
struct device *dev = dra7xx->pp.dev;
u32 reg;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
if (reg & ERR_SYS)
dev_dbg(dra7xx->dev, "System Error\n");
dev_dbg(dev, "System Error\n");
if (reg & ERR_FATAL)
dev_dbg(dra7xx->dev, "Fatal Error\n");
dev_dbg(dev, "Fatal Error\n");
if (reg & ERR_NONFATAL)
dev_dbg(dra7xx->dev, "Non Fatal Error\n");
dev_dbg(dev, "Non Fatal Error\n");
if (reg & ERR_COR)
dev_dbg(dra7xx->dev, "Correctable Error\n");
dev_dbg(dev, "Correctable Error\n");
if (reg & ERR_AXI)
dev_dbg(dra7xx->dev, "AXI tag lookup fatal Error\n");
dev_dbg(dev, "AXI tag lookup fatal Error\n");
if (reg & ERR_ECRC)
dev_dbg(dra7xx->dev, "ECRC Error\n");
dev_dbg(dev, "ECRC Error\n");
if (reg & PME_TURN_OFF)
dev_dbg(dra7xx->dev,
dev_dbg(dev,
"Power Management Event Turn-Off message received\n");
if (reg & PME_TO_ACK)
dev_dbg(dra7xx->dev,
dev_dbg(dev,
"Power Management Turn-Off Ack message received\n");
if (reg & PM_PME)
dev_dbg(dra7xx->dev,
"PM Power Management Event message received\n");
dev_dbg(dev, "PM Power Management Event message received\n");
if (reg & LINK_REQ_RST)
dev_dbg(dra7xx->dev, "Link Request Reset\n");
dev_dbg(dev, "Link Request Reset\n");
if (reg & LINK_UP_EVT)
dev_dbg(dra7xx->dev, "Link-up state change\n");
dev_dbg(dev, "Link-up state change\n");
if (reg & CFG_BME_EVT)
dev_dbg(dra7xx->dev, "CFG 'Bus Master Enable' change\n");
dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
if (reg & CFG_MSE_EVT)
dev_dbg(dra7xx->dev, "CFG 'Memory Space Enable' change\n");
dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
@ -278,13 +267,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
struct platform_device *pdev)
{
int ret;
struct pcie_port *pp;
struct pcie_port *pp = &dra7xx->pp;
struct device *dev = pp->dev;
struct resource *res;
struct device *dev = &pdev->dev;
pp = &dra7xx->pp;
pp->dev = dev;
pp->ops = &dra7xx_pcie_host_ops;
pp->irq = platform_get_irq(pdev, 1);
if (pp->irq < 0) {
@ -292,12 +277,11 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return -EINVAL;
}
ret = devm_request_irq(&pdev->dev, pp->irq,
dra7xx_pcie_msi_irq_handler,
ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"dra7-pcie-msi", pp);
"dra7-pcie-msi", dra7xx);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
dev_err(dev, "failed to request irq\n");
return ret;
}
@ -314,7 +298,7 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dra7xx->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -332,6 +316,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
void __iomem *base;
struct resource *res;
struct dra7xx_pcie *dra7xx;
struct pcie_port *pp;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
char name[10];
@ -343,6 +328,10 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
if (!dra7xx)
return -ENOMEM;
pp = &dra7xx->pp;
pp->dev = dev;
pp->ops = &dra7xx_pcie_host_ops;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "missing IRQ resource\n");
@ -390,7 +379,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx->base = base;
dra7xx->phy = phy;
dra7xx->dev = dev;
dra7xx->phy_count = phy_count;
pm_runtime_enable(dev);
@ -407,7 +395,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
ret = devm_gpio_request_one(dev, gpio_sel, gpio_flags,
"pcie_reset");
if (ret) {
dev_err(&pdev->dev, "gpio%d request failed, ret %d\n",
dev_err(dev, "gpio%d request failed, ret %d\n",
gpio_sel, ret);
goto err_gpio;
}
@ -420,12 +408,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
reg &= ~LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
platform_set_drvdata(pdev, dra7xx);
ret = dra7xx_add_pcie_port(dra7xx, pdev);
if (ret < 0)
goto err_gpio;
platform_set_drvdata(pdev, dra7xx);
return 0;
err_gpio:
@ -451,9 +438,9 @@ static int dra7xx_pcie_suspend(struct device *dev)
u32 val;
/* clear MSE */
val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val &= ~PCI_COMMAND_MEMORY;
dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
dw_pcie_writel_rc(pp, PCI_COMMAND, val);
return 0;
}
@ -465,9 +452,9 @@ static int dra7xx_pcie_resume(struct device *dev)
u32 val;
/* set MSE */
val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val |= PCI_COMMAND_MEMORY;
dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
dw_pcie_writel_rc(pp, PCI_COMMAND, val);
return 0;
}

View File

@ -29,13 +29,13 @@
#define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp)
struct exynos_pcie {
void __iomem *elbi_base;
void __iomem *phy_base;
void __iomem *block_base;
struct pcie_port pp;
void __iomem *elbi_base; /* DT 0th resource */
void __iomem *phy_base; /* DT 1st resource */
void __iomem *block_base; /* DT 2nd resource */
int reset_gpio;
struct clk *clk;
struct clk *bus_clk;
struct pcie_port pp;
};
/* PCIe ELBI registers */
@ -102,40 +102,40 @@ struct exynos_pcie {
#define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV3_LVCC 0x31c
static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
static void exynos_elb_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
{
writel(val, pcie->elbi_base + reg);
writel(val, exynos_pcie->elbi_base + reg);
}
static inline u32 exynos_elb_readl(struct exynos_pcie *pcie, u32 reg)
static u32 exynos_elb_readl(struct exynos_pcie *exynos_pcie, u32 reg)
{
return readl(pcie->elbi_base + reg);
return readl(exynos_pcie->elbi_base + reg);
}
static inline void exynos_phy_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
static void exynos_phy_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
{
writel(val, pcie->phy_base + reg);
writel(val, exynos_pcie->phy_base + reg);
}
static inline u32 exynos_phy_readl(struct exynos_pcie *pcie, u32 reg)
static u32 exynos_phy_readl(struct exynos_pcie *exynos_pcie, u32 reg)
{
return readl(pcie->phy_base + reg);
return readl(exynos_pcie->phy_base + reg);
}
static inline void exynos_blk_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
static void exynos_blk_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
{
writel(val, pcie->block_base + reg);
writel(val, exynos_pcie->block_base + reg);
}
static inline u32 exynos_blk_readl(struct exynos_pcie *pcie, u32 reg)
static u32 exynos_blk_readl(struct exynos_pcie *exynos_pcie, u32 reg)
{
return readl(pcie->block_base + reg);
return readl(exynos_pcie->block_base + reg);
}
static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *exynos_pcie,
bool on)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
if (on) {
val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
@ -148,10 +148,10 @@ static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
}
}
static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *exynos_pcie,
bool on)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
if (on) {
val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
@ -164,10 +164,9 @@ static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
}
}
static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
static void exynos_pcie_assert_core_reset(struct exynos_pcie *exynos_pcie)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
val &= ~PCIE_CORE_RESET_ENABLE;
@ -177,10 +176,9 @@ static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
exynos_elb_writel(exynos_pcie, 0, PCIE_NONSTICKY_RESET);
}
static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
static void exynos_pcie_deassert_core_reset(struct exynos_pcie *exynos_pcie)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
val |= PCIE_CORE_RESET_ENABLE;
@ -193,18 +191,14 @@ static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_MAC_RESET);
}
static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
static void exynos_pcie_assert_phy_reset(struct exynos_pcie *exynos_pcie)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_MAC_RESET);
exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_GLOBAL_RESET);
}
static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
static void exynos_pcie_deassert_phy_reset(struct exynos_pcie *exynos_pcie)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_GLOBAL_RESET);
exynos_elb_writel(exynos_pcie, 1, PCIE_PWR_RESET);
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
@ -213,10 +207,9 @@ static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
}
static void exynos_pcie_power_on_phy(struct pcie_port *pp)
static void exynos_pcie_power_on_phy(struct exynos_pcie *exynos_pcie)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
val &= ~PCIE_PHY_COMMON_PD_CMN;
@ -239,10 +232,9 @@ static void exynos_pcie_power_on_phy(struct pcie_port *pp)
exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
}
static void exynos_pcie_power_off_phy(struct pcie_port *pp)
static void exynos_pcie_power_off_phy(struct exynos_pcie *exynos_pcie)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
val |= PCIE_PHY_COMMON_PD_CMN;
@ -265,10 +257,8 @@ static void exynos_pcie_power_off_phy(struct pcie_port *pp)
exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
}
static void exynos_pcie_init_phy(struct pcie_port *pp)
static void exynos_pcie_init_phy(struct exynos_pcie *exynos_pcie)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
/* DCC feedback control off */
exynos_phy_writel(exynos_pcie, 0x29, PCIE_PHY_DCC_FEEDBACK);
@ -305,51 +295,41 @@ static void exynos_pcie_init_phy(struct pcie_port *pp)
exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV3_LVCC);
}
static void exynos_pcie_assert_reset(struct pcie_port *pp)
static void exynos_pcie_assert_reset(struct exynos_pcie *exynos_pcie)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
struct pcie_port *pp = &exynos_pcie->pp;
struct device *dev = pp->dev;
if (exynos_pcie->reset_gpio >= 0)
devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio,
devm_gpio_request_one(dev, exynos_pcie->reset_gpio,
GPIOF_OUT_INIT_HIGH, "RESET");
}
static int exynos_pcie_establish_link(struct pcie_port *pp)
static int exynos_pcie_establish_link(struct exynos_pcie *exynos_pcie)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
struct pcie_port *pp = &exynos_pcie->pp;
struct device *dev = pp->dev;
u32 val;
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "Link already up\n");
dev_err(dev, "Link already up\n");
return 0;
}
/* assert reset signals */
exynos_pcie_assert_core_reset(pp);
exynos_pcie_assert_phy_reset(pp);
/* de-assert phy reset */
exynos_pcie_deassert_phy_reset(pp);
/* power on phy */
exynos_pcie_power_on_phy(pp);
/* initialize phy */
exynos_pcie_init_phy(pp);
exynos_pcie_assert_core_reset(exynos_pcie);
exynos_pcie_assert_phy_reset(exynos_pcie);
exynos_pcie_deassert_phy_reset(exynos_pcie);
exynos_pcie_power_on_phy(exynos_pcie);
exynos_pcie_init_phy(exynos_pcie);
/* pulse for common reset */
exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_COMMON_RESET);
udelay(500);
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
/* de-assert core reset */
exynos_pcie_deassert_core_reset(pp);
/* setup root complex */
exynos_pcie_deassert_core_reset(exynos_pcie);
dw_pcie_setup_rc(pp);
/* assert reset signal */
exynos_pcie_assert_reset(pp);
exynos_pcie_assert_reset(exynos_pcie);
/* assert LTSSM enable */
exynos_elb_writel(exynos_pcie, PCIE_ELBI_LTSSM_ENABLE,
@ -361,27 +341,23 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) {
val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED);
dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
dev_info(dev, "PLL Locked: 0x%x\n", val);
}
/* power off phy */
exynos_pcie_power_off_phy(pp);
exynos_pcie_power_off_phy(exynos_pcie);
return -ETIMEDOUT;
}
static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *exynos_pcie)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE);
exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE);
}
static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *exynos_pcie)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
/* enable INTX interrupt */
val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
@ -391,23 +367,24 @@ static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct exynos_pcie *exynos_pcie = arg;
exynos_pcie_clear_irq_pulse(pp);
exynos_pcie_clear_irq_pulse(exynos_pcie);
return IRQ_HANDLED;
}
static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct exynos_pcie *exynos_pcie = arg;
struct pcie_port *pp = &exynos_pcie->pp;
return dw_handle_msi_irq(pp);
}
static void exynos_pcie_msi_init(struct pcie_port *pp)
static void exynos_pcie_msi_init(struct exynos_pcie *exynos_pcie)
{
struct pcie_port *pp = &exynos_pcie->pp;
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
dw_pcie_msi_init(pp);
@ -417,60 +394,64 @@ static void exynos_pcie_msi_init(struct pcie_port *pp)
exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
}
static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
static void exynos_pcie_enable_interrupts(struct exynos_pcie *exynos_pcie)
{
exynos_pcie_enable_irq_pulse(pp);
exynos_pcie_enable_irq_pulse(exynos_pcie);
if (IS_ENABLED(CONFIG_PCI_MSI))
exynos_pcie_msi_init(pp);
exynos_pcie_msi_init(exynos_pcie);
}
static inline u32 exynos_pcie_readl_rc(struct pcie_port *pp,
void __iomem *dbi_base)
static u32 exynos_pcie_readl_rc(struct pcie_port *pp, u32 reg)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
u32 val;
exynos_pcie_sideband_dbi_r_mode(pp, true);
val = readl(dbi_base);
exynos_pcie_sideband_dbi_r_mode(pp, false);
exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true);
val = readl(pp->dbi_base + reg);
exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false);
return val;
}
static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
u32 val, void __iomem *dbi_base)
static void exynos_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
{
exynos_pcie_sideband_dbi_w_mode(pp, true);
writel(val, dbi_base);
exynos_pcie_sideband_dbi_w_mode(pp, false);
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true);
writel(val, pp->dbi_base + reg);
exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false);
}
static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
int ret;
exynos_pcie_sideband_dbi_r_mode(pp, true);
exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true);
ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
exynos_pcie_sideband_dbi_r_mode(pp, false);
exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false);
return ret;
}
static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
u32 val)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
int ret;
exynos_pcie_sideband_dbi_w_mode(pp, true);
exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true);
ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
exynos_pcie_sideband_dbi_w_mode(pp, false);
exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false);
return ret;
}
static int exynos_pcie_link_up(struct pcie_port *pp)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
u32 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
u32 val;
val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
if (val == PCIE_ELBI_LTSSM_ENABLE)
return 1;
@ -479,8 +460,10 @@ static int exynos_pcie_link_up(struct pcie_port *pp)
static void exynos_pcie_host_init(struct pcie_port *pp)
{
exynos_pcie_establish_link(pp);
exynos_pcie_enable_interrupts(pp);
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
exynos_pcie_establish_link(exynos_pcie);
exynos_pcie_enable_interrupts(exynos_pcie);
}
static struct pcie_host_ops exynos_pcie_host_ops = {
@ -492,36 +475,38 @@ static struct pcie_host_ops exynos_pcie_host_ops = {
.host_init = exynos_pcie_host_init,
};
static int __init exynos_add_pcie_port(struct pcie_port *pp,
static int __init exynos_add_pcie_port(struct exynos_pcie *exynos_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &exynos_pcie->pp;
struct device *dev = pp->dev;
int ret;
pp->irq = platform_get_irq(pdev, 1);
if (!pp->irq) {
dev_err(&pdev->dev, "failed to get irq\n");
dev_err(dev, "failed to get irq\n");
return -ENODEV;
}
ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
IRQF_SHARED, "exynos-pcie", pp);
ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
IRQF_SHARED, "exynos-pcie", exynos_pcie);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
dev_err(dev, "failed to request irq\n");
return ret;
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq(pdev, 0);
if (!pp->msi_irq) {
dev_err(&pdev->dev, "failed to get msi irq\n");
dev_err(dev, "failed to get msi irq\n");
return -ENODEV;
}
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
ret = devm_request_irq(dev, pp->msi_irq,
exynos_pcie_msi_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"exynos-pcie", pp);
"exynos-pcie", exynos_pcie);
if (ret) {
dev_err(&pdev->dev, "failed to request msi irq\n");
dev_err(dev, "failed to request msi irq\n");
return ret;
}
}
@ -531,7 +516,7 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -540,37 +525,36 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
static int __init exynos_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_pcie *exynos_pcie;
struct pcie_port *pp;
struct device_node *np = pdev->dev.of_node;
struct device_node *np = dev->of_node;
struct resource *elbi_base;
struct resource *phy_base;
struct resource *block_base;
int ret;
exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie),
GFP_KERNEL);
exynos_pcie = devm_kzalloc(dev, sizeof(*exynos_pcie), GFP_KERNEL);
if (!exynos_pcie)
return -ENOMEM;
pp = &exynos_pcie->pp;
pp->dev = &pdev->dev;
pp->dev = dev;
exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie");
exynos_pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(exynos_pcie->clk)) {
dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
dev_err(dev, "Failed to get pcie rc clock\n");
return PTR_ERR(exynos_pcie->clk);
}
ret = clk_prepare_enable(exynos_pcie->clk);
if (ret)
return ret;
exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
exynos_pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(exynos_pcie->bus_clk)) {
dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
dev_err(dev, "Failed to get pcie bus clock\n");
ret = PTR_ERR(exynos_pcie->bus_clk);
goto fail_clk;
}
@ -579,27 +563,27 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
goto fail_clk;
elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
exynos_pcie->elbi_base = devm_ioremap_resource(dev, elbi_base);
if (IS_ERR(exynos_pcie->elbi_base)) {
ret = PTR_ERR(exynos_pcie->elbi_base);
goto fail_bus_clk;
}
phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
exynos_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
if (IS_ERR(exynos_pcie->phy_base)) {
ret = PTR_ERR(exynos_pcie->phy_base);
goto fail_bus_clk;
}
block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
exynos_pcie->block_base = devm_ioremap_resource(dev, block_base);
if (IS_ERR(exynos_pcie->block_base)) {
ret = PTR_ERR(exynos_pcie->block_base);
goto fail_bus_clk;
}
ret = exynos_add_pcie_port(pp, pdev);
ret = exynos_add_pcie_port(exynos_pcie, pdev);
if (ret < 0)
goto fail_bus_clk;

View File

@ -39,16 +39,15 @@ enum imx6_pcie_variants {
};
struct imx6_pcie {
struct pcie_port pp; /* pp.dbi_base is DT 0th resource */
int reset_gpio;
bool gpio_active_high;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie_inbound_axi;
struct clk *pcie;
struct pcie_port pp;
struct regmap *iomuxc_gpr;
enum imx6_pcie_variants variant;
void __iomem *mem_base;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
@ -96,14 +95,15 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
{
struct pcie_port *pp = &imx6_pcie->pp;
u32 val;
u32 max_iterations = 10;
u32 wait_counter = 0;
do {
val = readl(dbi_base + PCIE_PHY_STAT);
val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT);
val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
wait_counter++;
@ -116,123 +116,126 @@ static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
return -ETIMEDOUT;
}
static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
{
struct pcie_port *pp = &imx6_pcie->pp;
u32 val;
int ret;
val = addr << PCIE_PHY_CTRL_DATA_LOC;
writel(val, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
writel(val, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
ret = pcie_phy_poll_ack(dbi_base, 1);
ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
val = addr << PCIE_PHY_CTRL_DATA_LOC;
writel(val, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
return pcie_phy_poll_ack(dbi_base, 0);
return pcie_phy_poll_ack(imx6_pcie, 0);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
static int pcie_phy_read(void __iomem *dbi_base, int addr, int *data)
static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
{
struct pcie_port *pp = &imx6_pcie->pp;
u32 val, phy_ctl;
int ret;
ret = pcie_phy_wait_ack(dbi_base, addr);
ret = pcie_phy_wait_ack(imx6_pcie, addr);
if (ret)
return ret;
/* assert Read signal */
phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, phy_ctl);
ret = pcie_phy_poll_ack(dbi_base, 1);
ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
val = readl(dbi_base + PCIE_PHY_STAT);
val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT);
*data = val & 0xffff;
/* deassert Read signal */
writel(0x00, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x00);
return pcie_phy_poll_ack(dbi_base, 0);
return pcie_phy_poll_ack(imx6_pcie, 0);
}
static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
{
struct pcie_port *pp = &imx6_pcie->pp;
u32 var;
int ret;
/* write addr */
/* cap addr */
ret = pcie_phy_wait_ack(dbi_base, addr);
ret = pcie_phy_wait_ack(imx6_pcie, addr);
if (ret)
return ret;
var = data << PCIE_PHY_CTRL_DATA_LOC;
writel(var, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* capture data */
var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
writel(var, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
ret = pcie_phy_poll_ack(dbi_base, 1);
ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
/* deassert cap data */
var = data << PCIE_PHY_CTRL_DATA_LOC;
writel(var, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
ret = pcie_phy_poll_ack(dbi_base, 0);
ret = pcie_phy_poll_ack(imx6_pcie, 0);
if (ret)
return ret;
/* assert wr signal */
var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
writel(var, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* wait for ack */
ret = pcie_phy_poll_ack(dbi_base, 1);
ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
/* deassert wr signal */
var = data << PCIE_PHY_CTRL_DATA_LOC;
writel(var, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
ret = pcie_phy_poll_ack(dbi_base, 0);
ret = pcie_phy_poll_ack(imx6_pcie, 0);
if (ret)
return ret;
writel(0x0, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x0);
return 0;
}
static void imx6_pcie_reset_phy(struct pcie_port *pp)
static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
{
u32 tmp;
pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
usleep_range(2000, 3000);
pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
}
/* Added for PCI abort handling */
@ -242,9 +245,9 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
return 0;
}
static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
struct pcie_port *pp = &imx6_pcie->pp;
u32 val, gpr1, gpr12;
switch (imx6_pcie->variant) {
@ -281,10 +284,10 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
(gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
val = readl(pp->dbi_base + PCIE_PL_PFLR);
val = dw_pcie_readl_rc(pp, PCIE_PL_PFLR);
val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
val |= PCIE_PL_PFLR_FORCE_LINK;
writel(val, pp->dbi_base + PCIE_PL_PFLR);
dw_pcie_writel_rc(pp, PCIE_PL_PFLR, val);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
@ -296,20 +299,19 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
break;
}
return 0;
}
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
struct pcie_port *pp = &imx6_pcie->pp;
struct device *dev = pp->dev;
int ret = 0;
switch (imx6_pcie->variant) {
case IMX6SX:
ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
if (ret) {
dev_err(pp->dev, "unable to enable pcie_axi clock\n");
dev_err(dev, "unable to enable pcie_axi clock\n");
break;
}
@ -336,32 +338,33 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
return ret;
}
static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
struct pcie_port *pp = &imx6_pcie->pp;
struct device *dev = pp->dev;
int ret;
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
if (ret) {
dev_err(pp->dev, "unable to enable pcie_phy clock\n");
goto err_pcie_phy;
dev_err(dev, "unable to enable pcie_phy clock\n");
return;
}
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
if (ret) {
dev_err(pp->dev, "unable to enable pcie_bus clock\n");
dev_err(dev, "unable to enable pcie_bus clock\n");
goto err_pcie_bus;
}
ret = clk_prepare_enable(imx6_pcie->pcie);
if (ret) {
dev_err(pp->dev, "unable to enable pcie clock\n");
dev_err(dev, "unable to enable pcie clock\n");
goto err_pcie;
}
ret = imx6_pcie_enable_ref_clk(imx6_pcie);
if (ret) {
dev_err(pp->dev, "unable to enable pcie ref clock\n");
dev_err(dev, "unable to enable pcie ref clock\n");
goto err_ref_clk;
}
@ -392,7 +395,7 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
break;
}
return 0;
return;
err_ref_clk:
clk_disable_unprepare(imx6_pcie->pcie);
@ -400,14 +403,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
clk_disable_unprepare(imx6_pcie->pcie_bus);
err_pcie_bus:
clk_disable_unprepare(imx6_pcie->pcie_phy);
err_pcie_phy:
return ret;
}
static void imx6_pcie_init_phy(struct pcie_port *pp)
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
if (imx6_pcie->variant == IMX6SX)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK,
@ -439,45 +438,52 @@ static void imx6_pcie_init_phy(struct pcie_port *pp)
imx6_pcie->tx_swing_low << 25);
}
static int imx6_pcie_wait_for_link(struct pcie_port *pp)
static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
{
struct pcie_port *pp = &imx6_pcie->pp;
struct device *dev = pp->dev;
/* check if the link is up or not */
if (!dw_pcie_wait_for_link(pp))
return 0;
dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
return -ETIMEDOUT;
}
static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp)
static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
{
struct pcie_port *pp = &imx6_pcie->pp;
struct device *dev = pp->dev;
u32 tmp;
unsigned int retries;
for (retries = 0; retries < 200; retries++) {
tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
/* Test if the speed change finished. */
if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
return 0;
usleep_range(100, 1000);
}
dev_err(pp->dev, "Speed change timeout\n");
dev_err(dev, "Speed change timeout\n");
return -EINVAL;
}
static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct imx6_pcie *imx6_pcie = arg;
struct pcie_port *pp = &imx6_pcie->pp;
return dw_handle_msi_irq(pp);
}
static int imx6_pcie_establish_link(struct pcie_port *pp)
static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
struct pcie_port *pp = &imx6_pcie->pp;
struct device *dev = pp->dev;
u32 tmp;
int ret;
@ -486,76 +492,73 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
tmp = readl(pp->dbi_base + PCIE_RC_LCR);
tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
writel(tmp, pp->dbi_base + PCIE_RC_LCR);
dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp);
/* Start LTSSM. */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
ret = imx6_pcie_wait_for_link(pp);
ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret) {
dev_info(pp->dev, "Link never came up\n");
dev_info(dev, "Link never came up\n");
goto err_reset_phy;
}
if (imx6_pcie->link_gen == 2) {
/* Allow Gen2 mode after the link is up. */
tmp = readl(pp->dbi_base + PCIE_RC_LCR);
tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
writel(tmp, pp->dbi_base + PCIE_RC_LCR);
dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp);
} else {
dev_info(pp->dev, "Link: Gen2 disabled\n");
dev_info(dev, "Link: Gen2 disabled\n");
}
/*
* Start Directed Speed Change so the best possible speed both link
* partners support can be negotiated.
*/
tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp |= PORT_LOGIC_SPEED_CHANGE;
writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
ret = imx6_pcie_wait_for_speed_change(pp);
ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
if (ret) {
dev_err(pp->dev, "Failed to bring link up!\n");
dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
}
/* Make sure link training is finished as well! */
ret = imx6_pcie_wait_for_link(pp);
ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret) {
dev_err(pp->dev, "Failed to bring link up!\n");
dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
}
tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCSR);
dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
return 0;
err_reset_phy:
dev_dbg(pp->dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
imx6_pcie_reset_phy(pp);
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
imx6_pcie_reset_phy(imx6_pcie);
return ret;
}
static void imx6_pcie_host_init(struct pcie_port *pp)
{
imx6_pcie_assert_core_reset(pp);
imx6_pcie_init_phy(pp);
imx6_pcie_deassert_core_reset(pp);
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
imx6_pcie_assert_core_reset(imx6_pcie);
imx6_pcie_init_phy(imx6_pcie);
imx6_pcie_deassert_core_reset(imx6_pcie);
dw_pcie_setup_rc(pp);
imx6_pcie_establish_link(pp);
imx6_pcie_establish_link(imx6_pcie);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
@ -563,7 +566,7 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
static int imx6_pcie_link_up(struct pcie_port *pp)
{
return readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) &
return dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1) &
PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
}
@ -572,24 +575,26 @@ static struct pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
static int __init imx6_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &imx6_pcie->pp;
struct device *dev = pp->dev;
int ret;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
if (pp->msi_irq <= 0) {
dev_err(&pdev->dev, "failed to get MSI irq\n");
dev_err(dev, "failed to get MSI irq\n");
return -ENODEV;
}
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
ret = devm_request_irq(dev, pp->msi_irq,
imx6_pcie_msi_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"mx6-pcie-msi", pp);
"mx6-pcie-msi", imx6_pcie);
if (ret) {
dev_err(&pdev->dev, "failed to request MSI irq\n");
dev_err(dev, "failed to request MSI irq\n");
return ret;
}
}
@ -599,7 +604,7 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -608,75 +613,72 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
static int __init imx6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct imx6_pcie *imx6_pcie;
struct pcie_port *pp;
struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
struct device_node *node = pdev->dev.of_node;
struct device_node *node = dev->of_node;
int ret;
imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
return -ENOMEM;
pp = &imx6_pcie->pp;
pp->dev = &pdev->dev;
pp->dev = dev;
imx6_pcie->variant =
(enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev);
(enum imx6_pcie_variants)of_device_get_match_data(dev);
/* Added for PCI abort handling */
hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
"imprecise external abort");
dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
/* Fetch GPIOs */
imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
imx6_pcie->gpio_active_high = of_property_read_bool(np,
imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
imx6_pcie->gpio_active_high = of_property_read_bool(node,
"reset-gpio-active-high");
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
imx6_pcie->gpio_active_high ?
GPIOF_OUT_INIT_HIGH :
GPIOF_OUT_INIT_LOW,
"PCIe reset");
if (ret) {
dev_err(&pdev->dev, "unable to get reset gpio\n");
dev_err(dev, "unable to get reset gpio\n");
return ret;
}
}
/* Fetch clocks */
imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
if (IS_ERR(imx6_pcie->pcie_phy)) {
dev_err(&pdev->dev,
"pcie_phy clock source missing or invalid\n");
dev_err(dev, "pcie_phy clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_phy);
}
imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(imx6_pcie->pcie_bus)) {
dev_err(&pdev->dev,
"pcie_bus clock source missing or invalid\n");
dev_err(dev, "pcie_bus clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_bus);
}
imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
imx6_pcie->pcie = devm_clk_get(dev, "pcie");
if (IS_ERR(imx6_pcie->pcie)) {
dev_err(&pdev->dev,
"pcie clock source missing or invalid\n");
dev_err(dev, "pcie clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie);
}
if (imx6_pcie->variant == IMX6SX) {
imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
"pcie_inbound_axi");
if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
dev_err(&pdev->dev,
dev_err(dev,
"pcie_incbound_axi clock missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_inbound_axi);
}
@ -686,7 +688,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->iomuxc_gpr =
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
dev_err(&pdev->dev, "unable to find iomuxc registers\n");
dev_err(dev, "unable to find iomuxc registers\n");
return PTR_ERR(imx6_pcie->iomuxc_gpr);
}
@ -712,12 +714,12 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->tx_swing_low = 127;
/* Limit link speed */
ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed",
ret = of_property_read_u32(node, "fsl,max-link-speed",
&imx6_pcie->link_gen);
if (ret)
imx6_pcie->link_gen = 1;
ret = imx6_add_pcie_port(pp, pdev);
ret = imx6_add_pcie_port(imx6_pcie, pdev);
if (ret < 0)
return ret;
@ -730,7 +732,7 @@ static void imx6_pcie_shutdown(struct platform_device *pdev)
struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
/* bring down link, so bootloader gets clean state in case of reboot */
imx6_pcie_assert_core_reset(&imx6_pcie->pp);
imx6_pcie_assert_core_reset(imx6_pcie);
}
static const struct of_device_id imx6_pcie_of_match[] = {

View File

@ -88,13 +88,24 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
return ks_pcie->app.start + MSI_IRQ;
}
static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
{
return readl(ks_pcie->va_app_base + offset);
}
static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
{
writel(val, ks_pcie->va_app_base + offset);
}
void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
{
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
u32 pending, vector;
int src, virq;
pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
/*
* MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
@ -104,7 +115,7 @@ void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
if (BIT(src) & pending) {
vector = offset + (src << 3);
virq = irq_linear_revmap(pp->irq_domain, vector);
dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
src, vector, virq);
generic_handle_irq(virq);
}
@ -124,9 +135,9 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
BIT(bit_pos));
ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
}
void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
@ -135,8 +146,8 @@ void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
BIT(bit_pos));
}
void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
@ -145,8 +156,8 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
BIT(bit_pos));
}
static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
@ -215,6 +226,7 @@ static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
struct device *dev = pp->dev;
int i;
pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
@ -222,7 +234,7 @@ int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
&ks_dw_pcie_msi_domain_ops,
chip);
if (!pp->irq_domain) {
dev_err(pp->dev, "irq domain init failed\n");
dev_err(dev, "irq domain init failed\n");
return -ENXIO;
}
@ -237,47 +249,47 @@ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
int i;
for (i = 0; i < MAX_LEGACY_IRQS; i++)
writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
}
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
{
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
u32 pending;
int virq;
pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
if (BIT(0) & pending) {
virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
virq);
dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
generic_handle_irq(virq);
}
/* EOI the INTx interrupt */
writel(offset, ks_pcie->va_app_base + IRQ_EOI);
ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
}
void ks_dw_pcie_enable_error_irq(void __iomem *reg_base)
void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
{
writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET);
ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
}
irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
void __iomem *reg_base)
irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
{
u32 status;
status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
if (!status)
return IRQ_NONE;
if (status & ERR_FATAL_IRQ)
dev_err(dev, "fatal error (status %#010x)\n", status);
dev_err(ks_pcie->pp.dev, "fatal error (status %#010x)\n",
status);
/* Ack the IRQ; status bits are RW1C */
writel(status, reg_base + ERR_IRQ_STATUS);
ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
return IRQ_HANDLED;
}
@ -322,15 +334,15 @@ static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
*/
static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
{
u32 val;
writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
reg_virt + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
do {
val = readl(reg_virt + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
} while (!(val & DBI_CS2_EN_VAL));
}
@ -340,15 +352,15 @@ static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
*/
static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
{
u32 val;
writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
reg_virt + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
do {
val = readl(reg_virt + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
} while (val & DBI_CS2_EN_VAL);
}
@ -357,28 +369,29 @@ void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
struct pcie_port *pp = &ks_pcie->pp;
u32 start = pp->mem->start, end = pp->mem->end;
int i, tr_size;
u32 val;
/* Disable BARs for inbound access */
ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
ks_dw_pcie_set_dbi_mode(ks_pcie);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0);
ks_dw_pcie_clear_dbi_mode(ks_pcie);
/* Set outbound translation size per window division */
writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
/* Using Direct 1:1 mapping of RC <-> PCI memory space */
for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
start += tr_size;
}
/* Enable OB translation */
writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
}
/**
@ -418,7 +431,7 @@ static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
if (bus != 1)
regval |= BIT(24);
writel(regval, ks_pcie->va_app_base + CFG_SETUP);
ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
return pp->va_cfg0_base;
}
@ -456,19 +469,19 @@ void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
/* Configure and set up BAR0 */
ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
ks_dw_pcie_set_dbi_mode(ks_pcie);
/* Enable BAR0 */
writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 1);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, SZ_4K - 1);
ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
ks_dw_pcie_clear_dbi_mode(ks_pcie);
/*
* For BAR0, just setting bus address for inbound writes (MSI) should
* be sufficient. Use physical address to avoid any conflicts.
*/
writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
}
/**
@ -476,8 +489,9 @@ void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
*/
int ks_dw_pcie_link_up(struct pcie_port *pp)
{
u32 val = readl(pp->dbi_base + DEBUG0);
u32 val;
val = dw_pcie_readl_rc(pp, DEBUG0);
return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
}
@ -486,13 +500,13 @@ void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
u32 val;
/* Disable Link training */
val = readl(ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
/* Initiate Link Training */
val = readl(ks_pcie->va_app_base + CMD_STATUS);
writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
}
/**
@ -506,12 +520,13 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np)
{
struct pcie_port *pp = &ks_pcie->pp;
struct platform_device *pdev = to_platform_device(pp->dev);
struct device *dev = pp->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
/* Index 0 is the config reg. space address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pp->dbi_base = devm_ioremap_resource(pp->dev, res);
pp->dbi_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
@ -524,7 +539,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
/* Index 1 is the application reg. space address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ks_pcie->va_app_base))
return PTR_ERR(ks_pcie->va_app_base);
@ -537,7 +552,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
&ks_dw_pcie_legacy_irq_domain_ops,
NULL);
if (!ks_pcie->legacy_irq_domain) {
dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
dev_err(dev, "Failed to add irq domain for legacy irqs\n");
return -EINVAL;
}

View File

@ -89,12 +89,13 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
{
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
unsigned int retries;
dw_pcie_setup_rc(pp);
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "Link already up\n");
dev_err(dev, "Link already up\n");
return 0;
}
@ -105,7 +106,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
return 0;
}
dev_err(pp->dev, "phy link never came up\n");
dev_err(dev, "phy link never came up\n");
return -ETIMEDOUT;
}
@ -115,9 +116,10 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
u32 offset = irq - ks_pcie->msi_host_irqs[0];
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq);
dev_dbg(dev, "%s, irq %d\n", __func__, irq);
/*
* The chained irq handler installation would have replaced normal
@ -142,10 +144,11 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
dev_dbg(dev, ": Handling legacy irq %d\n", irq);
/*
* The chained irq handler installation would have replaced normal
@ -234,7 +237,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
}
if (ks_pcie->error_irq > 0)
ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base);
ks_dw_pcie_enable_error_irq(ks_pcie);
}
/*
@ -302,14 +305,14 @@ static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
{
struct keystone_pcie *ks_pcie = priv;
return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev,
ks_pcie->va_app_base);
return ks_dw_pcie_handle_error_irq(ks_pcie);
}
static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
int ret;
ret = ks_pcie_get_irq_controller_info(ks_pcie,
@ -332,12 +335,12 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
*/
ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
if (ks_pcie->error_irq <= 0)
dev_info(&pdev->dev, "no error IRQ defined\n");
dev_info(dev, "no error IRQ defined\n");
else {
ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
IRQF_SHARED, "pcie-error-irq", ks_pcie);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request error IRQ %d\n",
dev_err(dev, "failed to request error IRQ %d\n",
ks_pcie->error_irq);
return ret;
}
@ -347,7 +350,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
pp->ops = &keystone_pcie_host_ops;
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -381,12 +384,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct phy *phy;
int ret;
ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
GFP_KERNEL);
ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
if (!ks_pcie)
return -ENOMEM;
pp = &ks_pcie->pp;
pp->dev = dev;
/* initialize SerDes Phy if present */
phy = devm_phy_get(dev, "pcie-phy");
@ -408,7 +411,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
devm_iounmap(dev, reg_p);
devm_release_mem_region(dev, res->start, resource_size(res));
pp->dev = dev;
ks_pcie->np = dev->of_node;
platform_set_drvdata(pdev, ks_pcie);
ks_pcie->clk = devm_clk_get(dev, "pcie");

View File

@ -17,8 +17,8 @@
#define MAX_LEGACY_HOST_IRQS 4
struct keystone_pcie {
struct pcie_port pp; /* pp.dbi_base is DT 0th res */
struct clk *clk;
struct pcie_port pp;
/* PCI Device ID */
u32 device_id;
int num_legacy_host_irqs;
@ -34,7 +34,7 @@ struct keystone_pcie {
int error_irq;
/* Application register space */
void __iomem *va_app_base;
void __iomem *va_app_base; /* DT 1st resource */
struct resource app;
};
@ -45,9 +45,8 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
/* Keystone specific PCI controller APIs */
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
void ks_dw_pcie_enable_error_irq(void __iomem *reg_base);
irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
void __iomem *reg_base);
void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie);
irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie);
int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np);
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,

View File

@ -45,10 +45,9 @@ struct ls_pcie_drvdata {
};
struct ls_pcie {
void __iomem *dbi;
struct pcie_port pp; /* pp.dbi_base is DT regs */
void __iomem *lut;
struct regmap *scfg;
struct pcie_port pp;
const struct ls_pcie_drvdata *drvdata;
int index;
};
@ -59,7 +58,7 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
{
u32 header_type;
header_type = ioread8(pcie->dbi + PCI_HEADER_TYPE);
header_type = ioread8(pcie->pp.dbi_base + PCI_HEADER_TYPE);
header_type &= 0x7f;
return header_type == PCI_HEADER_TYPE_BRIDGE;
@ -68,13 +67,13 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
/* Clear multi-function bit */
static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
{
iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->pp.dbi_base + PCI_HEADER_TYPE);
}
/* Fix class value */
static void ls_pcie_fix_class(struct ls_pcie *pcie)
{
iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->pp.dbi_base + PCI_CLASS_DEVICE);
}
/* Drop MSG TLP except for Vendor MSG */
@ -82,9 +81,9 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
{
u32 val;
val = ioread32(pcie->dbi + PCIE_STRFMR1);
val = ioread32(pcie->pp.dbi_base + PCIE_STRFMR1);
val &= 0xDFFFFFFF;
iowrite32(val, pcie->dbi + PCIE_STRFMR1);
iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1);
}
static int ls1021_pcie_link_up(struct pcie_port *pp)
@ -106,18 +105,19 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
static void ls1021_pcie_host_init(struct pcie_port *pp)
{
struct device *dev = pp->dev;
struct ls_pcie *pcie = to_ls_pcie(pp);
u32 index[2];
pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
"fsl,pcie-scfg");
if (IS_ERR(pcie->scfg)) {
dev_err(pp->dev, "No syscfg phandle specified\n");
dev_err(dev, "No syscfg phandle specified\n");
pcie->scfg = NULL;
return;
}
if (of_property_read_u32_array(pp->dev->of_node,
if (of_property_read_u32_array(dev->of_node,
"fsl,pcie-scfg", index, 2)) {
pcie->scfg = NULL;
return;
@ -148,18 +148,19 @@ static void ls_pcie_host_init(struct pcie_port *pp)
{
struct ls_pcie *pcie = to_ls_pcie(pp);
iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
iowrite32(1, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
ls_pcie_fix_class(pcie);
ls_pcie_clear_multifunction(pcie);
ls_pcie_drop_msg_tlp(pcie);
iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
}
static int ls_pcie_msi_host_init(struct pcie_port *pp,
struct msi_controller *chip)
{
struct device *dev = pp->dev;
struct device_node *np = dev->of_node;
struct device_node *msi_node;
struct device_node *np = pp->dev->of_node;
/*
* The MSI domain is set by the generic of_msi_configure(). This
@ -169,7 +170,7 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp,
*/
msi_node = of_parse_phandle(np, "msi-parent", 0);
if (!msi_node) {
dev_err(pp->dev, "failed to find msi-parent\n");
dev_err(dev, "failed to find msi-parent\n");
return -EINVAL;
}
@ -212,19 +213,15 @@ static const struct of_device_id ls_pcie_of_match[] = {
{ },
};
static int __init ls_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
static int __init ls_add_pcie_port(struct ls_pcie *pcie)
{
struct pcie_port *pp = &pcie->pp;
struct device *dev = pp->dev;
int ret;
struct ls_pcie *pcie = to_ls_pcie(pp);
pp->dev = &pdev->dev;
pp->dbi_base = pcie->dbi;
pp->ops = pcie->drvdata->ops;
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(pp->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -233,38 +230,42 @@ static int __init ls_add_pcie_port(struct pcie_port *pp,
static int __init ls_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *match;
struct ls_pcie *pcie;
struct pcie_port *pp;
struct resource *dbi_base;
int ret;
match = of_match_device(ls_pcie_of_match, &pdev->dev);
match = of_match_device(ls_pcie_of_match, dev);
if (!match)
return -ENODEV;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pp = &pcie->pp;
pp->dev = dev;
pp->ops = pcie->drvdata->ops;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base);
if (IS_ERR(pcie->dbi)) {
dev_err(&pdev->dev, "missing *regs* space\n");
return PTR_ERR(pcie->dbi);
pcie->pp.dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pcie->pp.dbi_base)) {
dev_err(dev, "missing *regs* space\n");
return PTR_ERR(pcie->pp.dbi_base);
}
pcie->drvdata = match->data;
pcie->lut = pcie->dbi + pcie->drvdata->lut_offset;
pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
if (!ls_pcie_is_bridge(pcie))
return -ENODEV;
ret = ls_add_pcie_port(&pcie->pp, pdev);
ret = ls_add_pcie_port(pcie);
if (ret < 0)
return ret;
platform_set_drvdata(pdev, pcie);
return 0;
}

View File

@ -1190,13 +1190,13 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
static int mvebu_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mvebu_pcie *pcie;
struct device_node *np = pdev->dev.of_node;
struct device_node *np = dev->of_node;
struct device_node *child;
int num, i, ret;
pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@ -1206,7 +1206,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
/* Get the PCIe memory and I/O aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
if (resource_size(&pcie->mem) == 0) {
dev_err(&pdev->dev, "invalid memory aperture size\n");
dev_err(dev, "invalid memory aperture size\n");
return -EINVAL;
}
@ -1224,20 +1224,18 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
/* Get the bus range */
ret = of_pci_parse_bus_range(np, &pcie->busn);
if (ret) {
dev_err(&pdev->dev, "failed to parse bus-range property: %d\n",
ret);
dev_err(dev, "failed to parse bus-range property: %d\n", ret);
return ret;
}
num = of_get_available_child_count(pdev->dev.of_node);
num = of_get_available_child_count(np);
pcie->ports = devm_kcalloc(&pdev->dev, num, sizeof(*pcie->ports),
GFP_KERNEL);
pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
if (!pcie->ports)
return -ENOMEM;
i = 0;
for_each_available_child_of_node(pdev->dev.of_node, child) {
for_each_available_child_of_node(np, child) {
struct mvebu_pcie_port *port = &pcie->ports[i];
ret = mvebu_pcie_parse_port(pcie, port, child);
@ -1266,8 +1264,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
port->base = mvebu_pcie_map_registers(pdev, child, port);
if (IS_ERR(port->base)) {
dev_err(&pdev->dev, "%s: cannot map registers\n",
port->name);
dev_err(dev, "%s: cannot map registers\n", port->name);
port->base = NULL;
mvebu_pcie_powerdown(port);
continue;

View File

@ -154,10 +154,11 @@ static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
{
struct rcar_pci_priv *priv = pw;
struct device *dev = priv->dev;
u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
if (status & RCAR_PCI_INT_ALLERRORS) {
dev_err(priv->dev, "error irq: status %08x\n", status);
dev_err(dev, "error irq: status %08x\n", status);
/* clear the error(s) */
iowrite32(status & RCAR_PCI_INT_ALLERRORS,
@ -170,13 +171,14 @@ static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
{
struct device *dev = priv->dev;
int ret;
u32 val;
ret = devm_request_irq(priv->dev, priv->irq, rcar_pci_err_irq,
ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq,
IRQF_SHARED, "error irq", priv);
if (ret) {
dev_err(priv->dev, "cannot claim IRQ for error handling\n");
dev_err(dev, "cannot claim IRQ for error handling\n");
return;
}
@ -192,15 +194,16 @@ static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
{
struct rcar_pci_priv *priv = sys->private_data;
struct device *dev = priv->dev;
void __iomem *reg = priv->reg;
u32 val;
int ret;
pm_runtime_enable(priv->dev);
pm_runtime_get_sync(priv->dev);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val);
dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val);
/* Disable Direct Power Down State and assert reset */
val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
@ -275,7 +278,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
/* Add PCI resources */
pci_add_resource(&sys->resources, &priv->mem_res);
ret = devm_request_pci_bus_resources(priv->dev, &sys->resources);
ret = devm_request_pci_bus_resources(dev, &sys->resources);
if (ret < 0)
return ret;
@ -311,6 +314,7 @@ static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
struct device_node *np)
{
struct device *dev = pci->dev;
struct of_pci_range range;
struct of_pci_range_parser parser;
int index = 0;
@ -331,14 +335,14 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
/* Catch HW limitations */
if (!(range.flags & IORESOURCE_PREFETCH)) {
dev_err(pci->dev, "window must be prefetchable\n");
dev_err(dev, "window must be prefetchable\n");
return -EINVAL;
}
if (pci->window_addr) {
u32 lowaddr = 1 << (ffs(pci->window_addr) - 1);
if (lowaddr < pci->window_size) {
dev_err(pci->dev, "invalid window size/addr\n");
dev_err(dev, "invalid window size/addr\n");
return -EINVAL;
}
}
@ -350,6 +354,7 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
static int rcar_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *cfg_res, *mem_res;
struct rcar_pci_priv *priv;
void __iomem *reg;
@ -357,7 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
void *hw_private[1];
cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(&pdev->dev, cfg_res);
reg = devm_ioremap_resource(dev, cfg_res);
if (IS_ERR(reg))
return PTR_ERR(reg);
@ -368,8 +373,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
if (mem_res->start & 0xFFFF)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev,
sizeof(struct rcar_pci_priv), GFP_KERNEL);
priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@ -378,10 +382,10 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv->irq = platform_get_irq(pdev, 0);
priv->reg = reg;
priv->dev = &pdev->dev;
priv->dev = dev;
if (priv->irq < 0) {
dev_err(&pdev->dev, "no valid irq found\n");
dev_err(dev, "no valid irq found\n");
return priv->irq;
}
@ -390,23 +394,23 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv->window_pci = 0x40000000;
priv->window_size = SZ_1G;
if (pdev->dev.of_node) {
if (dev->of_node) {
struct resource busnr;
int ret;
ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr);
ret = of_pci_parse_bus_range(dev->of_node, &busnr);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse bus-range\n");
dev_err(dev, "failed to parse bus-range\n");
return ret;
}
priv->busnr = busnr.start;
if (busnr.end != busnr.start)
dev_warn(&pdev->dev, "only one bus number supported\n");
dev_warn(dev, "only one bus number supported\n");
ret = rcar_pci_parse_map_dma_ranges(priv, pdev->dev.of_node);
ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse dma-range\n");
dev_err(dev, "failed to parse dma-range\n");
return ret;
}
} else {
@ -421,7 +425,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
hw.map_irq = rcar_pci_map_irq;
hw.ops = &rcar_pci_ops;
hw.setup = rcar_pci_setup;
pci_common_init_dev(&pdev->dev, &hw);
pci_common_init_dev(dev, &hw);
return 0;
}

View File

@ -384,6 +384,7 @@ static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
unsigned int busnr)
{
struct device *dev = pcie->dev;
pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
phys_addr_t cs = pcie->cs->start;
@ -413,8 +414,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
if (err < 0) {
dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
err);
dev_err(dev, "ioremap_page_range() failed: %d\n", err);
goto unmap;
}
}
@ -462,6 +462,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
int where)
{
struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
struct device *dev = pcie->dev;
void __iomem *addr = NULL;
if (bus->number == 0) {
@ -482,8 +483,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
addr = (void __iomem *)b->area->addr;
if (!addr) {
dev_err(pcie->dev,
"failed to map cfg. space for bus %u\n",
dev_err(dev, "failed to map cfg. space for bus %u\n",
bus->number);
return NULL;
}
@ -584,12 +584,13 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
static void tegra_pcie_port_free(struct tegra_pcie_port *port)
{
struct tegra_pcie *pcie = port->pcie;
struct device *dev = pcie->dev;
devm_iounmap(pcie->dev, port->base);
devm_release_mem_region(pcie->dev, port->regs.start,
devm_iounmap(dev, port->base);
devm_release_mem_region(dev, port->regs.start,
resource_size(&port->regs));
list_del(&port->list);
devm_kfree(pcie->dev, port);
devm_kfree(dev, port);
}
/* Tegra PCIE root complex wrongly reports device class */
@ -612,12 +613,13 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct tegra_pcie *pcie = sys_to_pcie(sys);
struct device *dev = pcie->dev;
int err;
sys->mem_offset = pcie->offset.mem;
sys->io_offset = pcie->offset.io;
err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->io);
err = devm_request_resource(dev, &iomem_resource, &pcie->io);
if (err < 0)
return err;
@ -631,7 +633,7 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
err = devm_request_pci_bus_resources(pcie->dev, &sys->resources);
err = devm_request_pci_bus_resources(dev, &sys->resources);
if (err < 0)
return err;
@ -672,6 +674,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
"Peer2Peer error",
};
struct tegra_pcie *pcie = arg;
struct device *dev = pcie->dev;
u32 code, signature;
code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
@ -689,11 +692,9 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
* happen a lot during enumeration
*/
if (code == AFI_INTR_MASTER_ABORT)
dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
signature);
dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
else
dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
signature);
dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
code == AFI_INTR_FPCI_DECODE_ERROR) {
@ -701,9 +702,9 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
if (code == AFI_INTR_MASTER_ABORT)
dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
dev_dbg(dev, " FPCI address: %10llx\n", address);
else
dev_err(pcie->dev, " FPCI address: %10llx\n", address);
dev_err(dev, " FPCI address: %10llx\n", address);
}
return IRQ_HANDLED;
@ -793,6 +794,7 @@ static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
u32 value;
int err;
@ -829,7 +831,7 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
/* wait for the PLL to lock */
err = tegra_pcie_pll_wait(pcie, 500);
if (err < 0) {
dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
dev_err(dev, "PLL failed to lock: %d\n", err);
return err;
}
@ -859,7 +861,7 @@ static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
/* override IDDQ */
value = pads_readl(pcie, PADS_CTL);
value |= PADS_CTL_IDDQ_1L;
pads_writel(pcie, PADS_CTL, value);
pads_writel(pcie, value, PADS_CTL);
/* reset PLL */
value = pads_readl(pcie, soc->pads_pll_ctl);
@ -880,8 +882,7 @@ static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
for (i = 0; i < port->lanes; i++) {
err = phy_power_on(port->phys[i]);
if (err < 0) {
dev_err(dev, "failed to power on PHY#%u: %d\n", i,
err);
dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
return err;
}
}
@ -909,6 +910,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
int err;
@ -920,7 +922,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
err = tegra_pcie_phy_enable(pcie);
if (err < 0)
dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
dev_err(dev, "failed to power on PHY: %d\n", err);
return err;
}
@ -928,7 +930,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
list_for_each_entry(port, &pcie->ports, list) {
err = tegra_pcie_port_phy_power_on(port);
if (err < 0) {
dev_err(pcie->dev,
dev_err(dev,
"failed to power on PCIe port %u PHY: %d\n",
port->index, err);
return err;
@ -946,6 +948,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
struct tegra_pcie_port *port;
int err;
@ -956,8 +959,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
err = tegra_pcie_phy_disable(pcie);
if (err < 0)
dev_err(pcie->dev, "failed to power off PHY: %d\n",
err);
dev_err(dev, "failed to power off PHY: %d\n", err);
return err;
}
@ -965,7 +967,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
list_for_each_entry(port, &pcie->ports, list) {
err = tegra_pcie_port_phy_power_off(port);
if (err < 0) {
dev_err(pcie->dev,
dev_err(dev,
"failed to power off PCIe port %u PHY: %d\n",
port->index, err);
return err;
@ -977,6 +979,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
unsigned long value;
@ -1016,7 +1019,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
err = tegra_pcie_phy_power_on(pcie);
if (err < 0) {
dev_err(pcie->dev, "failed to power on PHY(s): %d\n", err);
dev_err(dev, "failed to power on PHY(s): %d\n", err);
return err;
}
@ -1049,13 +1052,14 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
static void tegra_pcie_power_off(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
/* TODO: disable and unprepare clocks? */
err = tegra_pcie_phy_power_off(pcie);
if (err < 0)
dev_err(pcie->dev, "failed to power off PHY(s): %d\n", err);
dev_err(dev, "failed to power off PHY(s): %d\n", err);
reset_control_assert(pcie->pcie_xrst);
reset_control_assert(pcie->afi_rst);
@ -1065,11 +1069,12 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
if (err < 0)
dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
dev_warn(dev, "failed to disable regulators: %d\n", err);
}
static int tegra_pcie_power_on(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
int err;
@ -1082,13 +1087,13 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
/* enable regulators */
err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
if (err < 0)
dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
dev_err(dev, "failed to enable regulators: %d\n", err);
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
pcie->pex_clk,
pcie->pex_rst);
if (err) {
dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
dev_err(dev, "powerup sequence failed: %d\n", err);
return err;
}
@ -1096,22 +1101,21 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
err = clk_prepare_enable(pcie->afi_clk);
if (err < 0) {
dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
dev_err(dev, "failed to enable AFI clock: %d\n", err);
return err;
}
if (soc->has_cml_clk) {
err = clk_prepare_enable(pcie->cml_clk);
if (err < 0) {
dev_err(pcie->dev, "failed to enable CML clock: %d\n",
err);
dev_err(dev, "failed to enable CML clock: %d\n", err);
return err;
}
}
err = clk_prepare_enable(pcie->pll_e);
if (err < 0) {
dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
dev_err(dev, "failed to enable PLLE clock: %d\n", err);
return err;
}
@ -1120,22 +1124,23 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
pcie->pex_clk = devm_clk_get(dev, "pex");
if (IS_ERR(pcie->pex_clk))
return PTR_ERR(pcie->pex_clk);
pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
pcie->afi_clk = devm_clk_get(dev, "afi");
if (IS_ERR(pcie->afi_clk))
return PTR_ERR(pcie->afi_clk);
pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
pcie->pll_e = devm_clk_get(dev, "pll_e");
if (IS_ERR(pcie->pll_e))
return PTR_ERR(pcie->pll_e);
if (soc->has_cml_clk) {
pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
pcie->cml_clk = devm_clk_get(dev, "cml");
if (IS_ERR(pcie->cml_clk))
return PTR_ERR(pcie->cml_clk);
}
@ -1145,15 +1150,17 @@ static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
{
pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
struct device *dev = pcie->dev;
pcie->pex_rst = devm_reset_control_get(dev, "pex");
if (IS_ERR(pcie->pex_rst))
return PTR_ERR(pcie->pex_rst);
pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
pcie->afi_rst = devm_reset_control_get(dev, "afi");
if (IS_ERR(pcie->afi_rst))
return PTR_ERR(pcie->afi_rst);
pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
pcie->pcie_xrst = devm_reset_control_get(dev, "pcie_x");
if (IS_ERR(pcie->pcie_xrst))
return PTR_ERR(pcie->pcie_xrst);
@ -1162,18 +1169,19 @@ static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
pcie->phy = devm_phy_optional_get(dev, "pcie");
if (IS_ERR(pcie->phy)) {
err = PTR_ERR(pcie->phy);
dev_err(pcie->dev, "failed to get PHY: %d\n", err);
dev_err(dev, "failed to get PHY: %d\n", err);
return err;
}
err = phy_init(pcie->phy);
if (err < 0) {
dev_err(pcie->dev, "failed to initialize PHY: %d\n", err);
dev_err(dev, "failed to initialize PHY: %d\n", err);
return err;
}
@ -1256,43 +1264,44 @@ static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *pads, *afi, *res;
int err;
err = tegra_pcie_clocks_get(pcie);
if (err) {
dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
dev_err(dev, "failed to get clocks: %d\n", err);
return err;
}
err = tegra_pcie_resets_get(pcie);
if (err) {
dev_err(&pdev->dev, "failed to get resets: %d\n", err);
dev_err(dev, "failed to get resets: %d\n", err);
return err;
}
err = tegra_pcie_phys_get(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to get PHYs: %d\n", err);
dev_err(dev, "failed to get PHYs: %d\n", err);
return err;
}
err = tegra_pcie_power_on(pcie);
if (err) {
dev_err(&pdev->dev, "failed to power up: %d\n", err);
dev_err(dev, "failed to power up: %d\n", err);
return err;
}
pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
pcie->pads = devm_ioremap_resource(dev, pads);
if (IS_ERR(pcie->pads)) {
err = PTR_ERR(pcie->pads);
goto poweroff;
}
afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
pcie->afi = devm_ioremap_resource(dev, afi);
if (IS_ERR(pcie->afi)) {
err = PTR_ERR(pcie->afi);
goto poweroff;
@ -1305,7 +1314,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
goto poweroff;
}
pcie->cs = devm_request_mem_region(pcie->dev, res->start,
pcie->cs = devm_request_mem_region(dev, res->start,
resource_size(res), res->name);
if (!pcie->cs) {
err = -EADDRNOTAVAIL;
@ -1315,7 +1324,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
/* request interrupt */
err = platform_get_irq_byname(pdev, "intr");
if (err < 0) {
dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
dev_err(dev, "failed to get IRQ: %d\n", err);
goto poweroff;
}
@ -1323,7 +1332,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
if (err) {
dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
dev_err(dev, "failed to register IRQ: %d\n", err);
goto poweroff;
}
@ -1336,6 +1345,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
if (pcie->irq > 0)
@ -1345,7 +1355,7 @@ static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
err = phy_exit(pcie->phy);
if (err < 0)
dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
dev_err(dev, "failed to teardown PHY: %d\n", err);
return 0;
}
@ -1384,6 +1394,7 @@ static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
{
struct tegra_pcie *pcie = data;
struct device *dev = pcie->dev;
struct tegra_msi *msi = &pcie->msi;
unsigned int i, processed = 0;
@ -1403,13 +1414,13 @@ static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
if (test_bit(index, msi->used))
generic_handle_irq(irq);
else
dev_info(pcie->dev, "unhandled MSI\n");
dev_info(dev, "unhandled MSI\n");
} else {
/*
* that's weird who triggered this?
* just clear it
*/
dev_info(pcie->dev, "unexpected MSI\n");
dev_info(dev, "unexpected MSI\n");
}
/* see if there's any more pending in this vector */
@ -1488,7 +1499,8 @@ static const struct irq_domain_ops msi_domain_ops = {
static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi;
unsigned long base;
@ -1497,20 +1509,20 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
mutex_init(&msi->lock);
msi->chip.dev = pcie->dev;
msi->chip.dev = dev;
msi->chip.setup_irq = tegra_msi_setup_irq;
msi->chip.teardown_irq = tegra_msi_teardown_irq;
msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
&msi_domain_ops, &msi->chip);
if (!msi->domain) {
dev_err(&pdev->dev, "failed to create IRQ domain\n");
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
err = platform_get_irq_byname(pdev, "msi");
if (err < 0) {
dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
dev_err(dev, "failed to get IRQ: %d\n", err);
goto err;
}
@ -1519,7 +1531,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
tegra_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
@ -1594,46 +1606,47 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
u32 *xbar)
{
struct device_node *np = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
switch (lanes) {
case 0x0000104:
dev_info(pcie->dev, "4x1, 1x1 configuration\n");
dev_info(dev, "4x1, 1x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
return 0;
case 0x0000102:
dev_info(pcie->dev, "2x1, 1x1 configuration\n");
dev_info(dev, "2x1, 1x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
return 0;
}
} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
switch (lanes) {
case 0x00000204:
dev_info(pcie->dev, "4x1, 2x1 configuration\n");
dev_info(dev, "4x1, 2x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
return 0;
case 0x00020202:
dev_info(pcie->dev, "2x3 configuration\n");
dev_info(dev, "2x3 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
return 0;
case 0x00010104:
dev_info(pcie->dev, "4x1, 1x2 configuration\n");
dev_info(dev, "4x1, 1x2 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
return 0;
}
} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
switch (lanes) {
case 0x00000004:
dev_info(pcie->dev, "single-mode configuration\n");
dev_info(dev, "single-mode configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
return 0;
case 0x00000202:
dev_info(pcie->dev, "dual-mode configuration\n");
dev_info(dev, "dual-mode configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
return 0;
}
@ -1673,7 +1686,8 @@ static bool of_regulator_bulk_available(struct device_node *np,
*/
static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
{
struct device_node *np = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
pcie->num_supplies = 3;
@ -1681,12 +1695,12 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
pcie->num_supplies = 2;
if (pcie->num_supplies == 0) {
dev_err(pcie->dev, "device %s not supported in legacy mode\n",
dev_err(dev, "device %s not supported in legacy mode\n",
np->full_name);
return -ENODEV;
}
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@ -1698,8 +1712,7 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
if (pcie->num_supplies > 2)
pcie->supplies[2].supply = "avdd";
return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
pcie->supplies);
return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
}
/*
@ -1713,13 +1726,14 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
*/
static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
{
struct device_node *np = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
unsigned int i = 0;
if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
pcie->num_supplies = 7;
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@ -1746,7 +1760,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
(need_pexb ? 2 : 0);
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@ -1769,7 +1783,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
pcie->num_supplies = 5;
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@ -1782,9 +1796,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
pcie->supplies[4].supply = "vddio-pex-clk";
}
if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
pcie->num_supplies))
return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
return devm_regulator_bulk_get(dev, pcie->num_supplies,
pcie->supplies);
/*
@ -1792,9 +1806,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
* that the device tree complies with an older version of the device
* tree binding.
*/
dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
dev_info(dev, "using legacy DT binding for power supplies\n");
devm_kfree(pcie->dev, pcie->supplies);
devm_kfree(dev, pcie->supplies);
pcie->num_supplies = 0;
return tegra_pcie_get_legacy_regulators(pcie);
@ -1802,7 +1816,8 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
struct device_node *np = pcie->dev->of_node, *port;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node, *port;
const struct tegra_pcie_soc *soc = pcie->soc;
struct of_pci_range_parser parser;
struct of_pci_range range;
@ -1812,7 +1827,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
int err;
if (of_pci_range_parser_init(&parser, np)) {
dev_err(pcie->dev, "missing \"ranges\" property\n");
dev_err(dev, "missing \"ranges\" property\n");
return -EINVAL;
}
@ -1867,8 +1882,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_pci_parse_bus_range(np, &pcie->busn);
if (err < 0) {
dev_err(pcie->dev, "failed to parse ranges property: %d\n",
err);
dev_err(dev, "failed to parse ranges property: %d\n", err);
pcie->busn.name = np->name;
pcie->busn.start = 0;
pcie->busn.end = 0xff;
@ -1883,15 +1897,14 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_pci_get_devfn(port);
if (err < 0) {
dev_err(pcie->dev, "failed to parse address: %d\n",
err);
dev_err(dev, "failed to parse address: %d\n", err);
return err;
}
index = PCI_SLOT(err);
if (index < 1 || index > soc->num_ports) {
dev_err(pcie->dev, "invalid port number: %d\n", index);
dev_err(dev, "invalid port number: %d\n", index);
return -EINVAL;
}
@ -1899,13 +1912,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_property_read_u32(port, "nvidia,num-lanes", &value);
if (err < 0) {
dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
dev_err(dev, "failed to parse # of lanes: %d\n",
err);
return err;
}
if (value > 16) {
dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
dev_err(dev, "invalid # of lanes: %u\n", value);
return -EINVAL;
}
@ -1919,14 +1932,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
mask |= ((1 << value) - 1) << lane;
lane += value;
rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
if (!rp)
return -ENOMEM;
err = of_address_to_resource(port, 0, &rp->regs);
if (err < 0) {
dev_err(pcie->dev, "failed to parse address: %d\n",
err);
dev_err(dev, "failed to parse address: %d\n", err);
return err;
}
@ -1936,7 +1948,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->pcie = pcie;
rp->np = port;
rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
rp->base = devm_ioremap_resource(dev, &rp->regs);
if (IS_ERR(rp->base))
return PTR_ERR(rp->base);
@ -1945,7 +1957,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
if (err < 0) {
dev_err(pcie->dev, "invalid lane configuration\n");
dev_err(dev, "invalid lane configuration\n");
return err;
}
@ -1964,6 +1976,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
{
struct device *dev = port->pcie->dev;
unsigned int retries = 3;
unsigned long value;
@ -1986,8 +1999,7 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
} while (--timeout);
if (!timeout) {
dev_err(port->pcie->dev, "link %u down, retrying\n",
port->index);
dev_err(dev, "link %u down, retrying\n", port->index);
goto retry;
}
@ -2011,11 +2023,12 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
static int tegra_pcie_enable(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
struct tegra_pcie_port *port, *tmp;
struct hw_pci hw;
list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
dev_info(pcie->dev, "probing port %u, using %u lanes\n",
dev_info(dev, "probing port %u, using %u lanes\n",
port->index, port->lanes);
tegra_pcie_port_enable(port);
@ -2023,7 +2036,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
if (tegra_pcie_port_check_link(port))
continue;
dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
dev_info(dev, "link %u down, ignoring\n", port->index);
tegra_pcie_port_disable(port);
tegra_pcie_port_free(port);
@ -2041,8 +2054,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
hw.map_irq = tegra_pcie_map_irq;
hw.ops = &tegra_pcie_ops;
pci_common_init_dev(pcie->dev, &hw);
pci_common_init_dev(dev, &hw);
return 0;
}
@ -2204,17 +2216,18 @@ static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
static int tegra_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra_pcie *pcie;
int err;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->soc = of_device_get_match_data(&pdev->dev);
pcie->soc = of_device_get_match_data(dev);
INIT_LIST_HEAD(&pcie->buses);
INIT_LIST_HEAD(&pcie->ports);
pcie->dev = &pdev->dev;
pcie->dev = dev;
err = tegra_pcie_parse_dt(pcie);
if (err < 0)
@ -2222,7 +2235,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
err = tegra_pcie_get_resources(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request resources: %d\n", err);
dev_err(dev, "failed to request resources: %d\n", err);
return err;
}
@ -2236,27 +2249,23 @@ static int tegra_pcie_probe(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = tegra_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(&pdev->dev,
"failed to enable MSI support: %d\n",
err);
dev_err(dev, "failed to enable MSI support: %d\n", err);
goto put_resources;
}
}
err = tegra_pcie_enable(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
dev_err(dev, "failed to enable PCIe ports: %d\n", err);
goto disable_msi;
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_pcie_debugfs_init(pcie);
if (err < 0)
dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
err);
dev_err(dev, "failed to setup debugfs: %d\n", err);
}
platform_set_drvdata(pdev, pcie);
return 0;
disable_msi:

View File

@ -76,6 +76,16 @@ struct xgene_pcie_port {
u32 version;
};
static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg)
{
return readl(port->csr_base + reg);
}
static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val)
{
writel(val, port->csr_base + reg);
}
static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
{
return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
@ -112,9 +122,9 @@ static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
if (!pci_is_root_bus(bus))
rtdid_val = (b << 8) | (d << 3) | f;
writel(rtdid_val, port->csr_base + RTDID);
xgene_pcie_writel(port, RTDID, rtdid_val);
/* read the register back to ensure flush */
readl(port->csr_base + RTDID);
xgene_pcie_readl(port, RTDID);
}
/*
@ -179,28 +189,28 @@ static struct pci_ops xgene_pcie_ops = {
.write = pci_generic_config_write32,
};
static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
u32 flags, u64 size)
{
u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
u32 val32 = 0;
u32 val;
val32 = readl(csr_base + addr);
val32 = xgene_pcie_readl(port, addr);
val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
writel(val, csr_base + addr);
xgene_pcie_writel(port, addr, val);
val32 = readl(csr_base + addr + 0x04);
val32 = xgene_pcie_readl(port, addr + 0x04);
val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
writel(val, csr_base + addr + 0x04);
xgene_pcie_writel(port, addr + 0x04, val);
val32 = readl(csr_base + addr + 0x04);
val32 = xgene_pcie_readl(port, addr + 0x04);
val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
writel(val, csr_base + addr + 0x04);
xgene_pcie_writel(port, addr + 0x04, val);
val32 = readl(csr_base + addr + 0x08);
val32 = xgene_pcie_readl(port, addr + 0x08);
val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
writel(val, csr_base + addr + 0x08);
xgene_pcie_writel(port, addr + 0x08, val);
return mask;
}
@ -208,32 +218,32 @@ static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
static void xgene_pcie_linkup(struct xgene_pcie_port *port,
u32 *lanes, u32 *speed)
{
void __iomem *csr_base = port->csr_base;
u32 val32;
port->link_up = false;
val32 = readl(csr_base + PCIECORE_CTLANDSTATUS);
val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS);
if (val32 & LINK_UP_MASK) {
port->link_up = true;
*speed = PIPE_PHY_RATE_RD(val32);
val32 = readl(csr_base + BRIDGE_STATUS_0);
val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0);
*lanes = val32 >> 26;
}
}
static int xgene_pcie_init_port(struct xgene_pcie_port *port)
{
struct device *dev = port->dev;
int rc;
port->clk = clk_get(port->dev, NULL);
port->clk = clk_get(dev, NULL);
if (IS_ERR(port->clk)) {
dev_err(port->dev, "clock not available\n");
dev_err(dev, "clock not available\n");
return -ENODEV;
}
rc = clk_prepare_enable(port->clk);
if (rc) {
dev_err(port->dev, "clock enable failed\n");
dev_err(dev, "clock enable failed\n");
return rc;
}
@ -243,15 +253,16 @@ static int xgene_pcie_init_port(struct xgene_pcie_port *port)
static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
struct platform_device *pdev)
{
struct device *dev = port->dev;
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
port->csr_base = devm_ioremap_resource(port->dev, res);
port->csr_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port->csr_base))
return PTR_ERR(port->csr_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
port->cfg_base = devm_ioremap_resource(port->dev, res);
port->cfg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port->cfg_base))
return PTR_ERR(port->cfg_base);
port->cfg_addr = res->start;
@ -263,7 +274,7 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
struct resource *res, u32 offset,
u64 cpu_addr, u64 pci_addr)
{
void __iomem *base = port->csr_base + offset;
struct device *dev = port->dev;
resource_size_t size = resource_size(res);
u64 restype = resource_type(res);
u64 mask = 0;
@ -280,22 +291,24 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
if (size >= min_size)
mask = ~(size - 1) | flag;
else
dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n",
dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n",
(u64)size, min_size);
writel(lower_32_bits(cpu_addr), base);
writel(upper_32_bits(cpu_addr), base + 0x04);
writel(lower_32_bits(mask), base + 0x08);
writel(upper_32_bits(mask), base + 0x0c);
writel(lower_32_bits(pci_addr), base + 0x10);
writel(upper_32_bits(pci_addr), base + 0x14);
xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr));
xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr));
xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask));
xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask));
xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr));
xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
}
static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr)
static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
{
writel(lower_32_bits(addr), csr_base + CFGBARL);
writel(upper_32_bits(addr), csr_base + CFGBARH);
writel(EN_REG, csr_base + CFGCTL);
u64 addr = port->cfg_addr;
xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr));
xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr));
xgene_pcie_writel(port, CFGCTL, EN_REG);
}
static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
@ -310,7 +323,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
struct resource *res = window->res;
u64 restype = resource_type(res);
dev_dbg(port->dev, "%pR\n", res);
dev_dbg(dev, "%pR\n", res);
switch (restype) {
case IORESOURCE_IO:
@ -339,17 +352,18 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
return -EINVAL;
}
}
xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr);
xgene_pcie_setup_cfg_reg(port);
return 0;
}
static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size)
static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg,
u64 pim, u64 size)
{
writel(lower_32_bits(pim), addr);
writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04);
writel(lower_32_bits(size), addr + 0x10);
writel(upper_32_bits(size), addr + 0x14);
xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
xgene_pcie_writel(port, pim_reg + 0x04,
upper_32_bits(pim) | EN_COHERENCY);
xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size));
xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size));
}
/*
@ -379,10 +393,10 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
struct of_pci_range *range, u8 *ib_reg_mask)
{
void __iomem *csr_base = port->csr_base;
void __iomem *cfg_base = port->cfg_base;
struct device *dev = port->dev;
void *bar_addr;
void *pim_addr;
u32 pim_reg;
u64 cpu_addr = range->cpu_addr;
u64 pci_addr = range->pci_addr;
u64 size = range->size;
@ -393,7 +407,7 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
if (region < 0) {
dev_warn(port->dev, "invalid pcie dma-range config\n");
dev_warn(dev, "invalid pcie dma-range config\n");
return;
}
@ -403,29 +417,27 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
switch (region) {
case 0:
xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size);
xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size);
bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
writel(bar_low, bar_addr);
writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
pim_addr = csr_base + PIM1_1L;
pim_reg = PIM1_1L;
break;
case 1:
bar_addr = csr_base + IBAR2;
writel(bar_low, bar_addr);
writel(lower_32_bits(mask), csr_base + IR2MSK);
pim_addr = csr_base + PIM2_1L;
xgene_pcie_writel(port, IBAR2, bar_low);
xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask));
pim_reg = PIM2_1L;
break;
case 2:
bar_addr = csr_base + IBAR3L;
writel(bar_low, bar_addr);
writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
writel(lower_32_bits(mask), csr_base + IR3MSKL);
writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4);
pim_addr = csr_base + PIM3_1L;
xgene_pcie_writel(port, IBAR3L, bar_low);
xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr));
xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask));
xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask));
pim_reg = PIM3_1L;
break;
}
xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1));
xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
}
static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
@ -463,7 +475,7 @@ static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
for_each_of_pci_range(&parser, &range) {
u64 end = range.cpu_addr + range.size - 1;
dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
range.flags, range.cpu_addr, end, range.pci_addr);
xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
}
@ -476,13 +488,14 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
int i;
for (i = PIM1_1L; i <= CFGCTL; i += 4)
writel(0x0, port->csr_base + i);
xgene_pcie_writel(port, i, 0);
}
static int xgene_pcie_setup(struct xgene_pcie_port *port,
struct list_head *res,
resource_size_t io_base)
{
struct device *dev = port->dev;
u32 val, lanes = 0, speed = 0;
int ret;
@ -490,7 +503,7 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
/* setup the vendor and device IDs correctly */
val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
writel(val, port->csr_base + BRIDGE_CFG_0);
xgene_pcie_writel(port, BRIDGE_CFG_0, val);
ret = xgene_pcie_map_ranges(port, res, io_base);
if (ret)
@ -502,27 +515,28 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
xgene_pcie_linkup(port, &lanes, &speed);
if (!port->link_up)
dev_info(port->dev, "(rc) link down\n");
dev_info(dev, "(rc) link down\n");
else
dev_info(port->dev, "(rc) x%d gen-%d link up\n",
lanes, speed + 1);
dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1);
return 0;
}
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
struct xgene_pcie_port *port;
resource_size_t iobase = 0;
struct pci_bus *bus;
int ret;
LIST_HEAD(res);
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->node = of_node_get(pdev->dev.of_node);
port->dev = &pdev->dev;
port->node = of_node_get(dn);
port->dev = dev;
port->version = XGENE_PCIE_IP_VER_UNKN;
if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
@ -540,7 +554,7 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
return ret;
ret = devm_request_pci_bus_resources(&pdev->dev, &res);
ret = devm_request_pci_bus_resources(dev, &res);
if (ret)
goto error;
@ -548,8 +562,7 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
goto error;
bus = pci_create_root_bus(&pdev->dev, 0,
&xgene_pcie_ops, port, &res);
bus = pci_create_root_bus(dev, 0, &xgene_pcie_ops, port, &res);
if (!bus) {
ret = -ENOMEM;
goto error;
@ -558,8 +571,6 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, port);
return 0;
error:

View File

@ -55,15 +55,19 @@
#define TLP_PAYLOAD_SIZE 0x01
#define TLP_READ_TAG 0x1d
#define TLP_WRITE_TAG 0x10
#define TLP_CFG_DW0(fmttype) (((fmttype) << 24) | TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(reqid, tag, be) (((reqid) << 16) | (tag << 8) | (be))
#define RP_DEVFN 0
#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_CFG_DW0(pcie, bus) \
((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \
: TLP_FMTTYPE_CFGRD1) << 24) | \
TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(pcie, tag, be) \
(((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_COMP_STATUS(s) (((s) >> 12) & 7)
#define TLP_HDR_SIZE 3
#define TLP_LOOP 500
#define RP_DEVFN 0
#define LINK_UP_TIMEOUT HZ
#define LINK_RETRAIN_TIMEOUT HZ
@ -74,7 +78,7 @@
struct altera_pcie {
struct platform_device *pdev;
void __iomem *cra_base;
void __iomem *cra_base; /* DT Cra */
int irq;
u8 root_bus_nr;
struct irq_domain *irq_domain;
@ -131,7 +135,7 @@ static void tlp_write_tx(struct altera_pcie *pcie,
cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
}
static bool altera_pcie_valid_config(struct altera_pcie *pcie,
static bool altera_pcie_valid_device(struct altera_pcie *pcie,
struct pci_bus *bus, int dev)
{
/* If there is no link, then there is no device */
@ -218,13 +222,8 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
{
u32 headers[TLP_HDR_SIZE];
if (bus == pcie->root_bus_nr)
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD0);
else
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
TLP_READ_TAG, byte_en);
headers[0] = TLP_CFG_DW0(pcie, bus);
headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
tlp_write_packet(pcie, headers, 0, false);
@ -238,13 +237,8 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
u32 headers[TLP_HDR_SIZE];
int ret;
if (bus == pcie->root_bus_nr)
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR0);
else
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
TLP_WRITE_TAG, byte_en);
headers[0] = TLP_CFG_DW0(pcie, bus);
headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
/* check alignment to Qword */
@ -342,7 +336,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) {
*value = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@ -359,7 +353,7 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
@ -394,6 +388,7 @@ static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
static void altera_wait_link_retrain(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
u16 reg16;
unsigned long start_jiffies;
@ -406,7 +401,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie)
break;
if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
dev_err(&pcie->pdev->dev, "link retrain timeout\n");
dev_err(dev, "link retrain timeout\n");
break;
}
udelay(100);
@ -419,7 +414,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie)
break;
if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
dev_err(&pcie->pdev->dev, "link up timeout\n");
dev_err(dev, "link up timeout\n");
break;
}
udelay(100);
@ -460,7 +455,6 @@ static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
}
@ -472,12 +466,14 @@ static void altera_pcie_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct altera_pcie *pcie;
struct device *dev;
unsigned long status;
u32 bit;
u32 virq;
chained_irq_enter(chip, desc);
pcie = irq_desc_get_handler_data(desc);
dev = &pcie->pdev->dev;
while ((status = cra_readl(pcie, P2A_INT_STATUS)
& P2A_INT_STS_ALL) != 0) {
@ -489,8 +485,7 @@ static void altera_pcie_isr(struct irq_desc *desc)
if (virq)
generic_handle_irq(virq);
else
dev_err(&pcie->pdev->dev,
"unexpected IRQ, INT%d\n", bit);
dev_err(dev, "unexpected IRQ, INT%d\n", bit);
}
}
@ -549,30 +544,25 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
static int altera_pcie_parse_dt(struct altera_pcie *pcie)
{
struct resource *cra;
struct device *dev = &pcie->pdev->dev;
struct platform_device *pdev = pcie->pdev;
struct resource *cra;
cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
if (!cra) {
dev_err(&pdev->dev, "no Cra memory resource defined\n");
return -ENODEV;
}
pcie->cra_base = devm_ioremap_resource(&pdev->dev, cra);
pcie->cra_base = devm_ioremap_resource(dev, cra);
if (IS_ERR(pcie->cra_base)) {
dev_err(&pdev->dev, "failed to map cra memory\n");
dev_err(dev, "failed to map cra memory\n");
return PTR_ERR(pcie->cra_base);
}
/* setup IRQ */
pcie->irq = platform_get_irq(pdev, 0);
if (pcie->irq <= 0) {
dev_err(&pdev->dev, "failed to get IRQ: %d\n", pcie->irq);
dev_err(dev, "failed to get IRQ: %d\n", pcie->irq);
return -EINVAL;
}
irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
return 0;
}
@ -583,12 +573,13 @@ static void altera_pcie_host_init(struct altera_pcie *pcie)
static int altera_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct altera_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
int ret;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@ -596,7 +587,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
ret = altera_pcie_parse_dt(pcie);
if (ret) {
dev_err(&pdev->dev, "Parsing DT failed\n");
dev_err(dev, "Parsing DT failed\n");
return ret;
}
@ -604,13 +595,13 @@ static int altera_pcie_probe(struct platform_device *pdev)
ret = altera_pcie_parse_request_of_pci_ranges(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed add resources\n");
dev_err(dev, "Failed add resources\n");
return ret;
}
ret = altera_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed creating IRQ Domain\n");
dev_err(dev, "Failed creating IRQ Domain\n");
return ret;
}
@ -620,7 +611,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
altera_pcie_host_init(pcie);
bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops,
bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &altera_pcie_ops,
pcie, &pcie->resources);
if (!bus)
return -ENOMEM;
@ -633,8 +624,6 @@ static int altera_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, pcie);
return ret;
}

View File

@ -29,34 +29,33 @@
#include "pcie-designware.h"
struct armada8k_pcie {
void __iomem *base;
struct pcie_port pp; /* pp.dbi_base is DT ctrl */
struct clk *clk;
struct pcie_port pp;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
#define PCIE_GLOBAL_CONTROL_REG 0x0
#define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0)
#define PCIE_APP_LTSSM_EN BIT(2)
#define PCIE_DEVICE_TYPE_SHIFT 4
#define PCIE_DEVICE_TYPE_MASK 0xF
#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */
#define PCIE_GLOBAL_STATUS_REG 0x8
#define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8)
#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1)
#define PCIE_GLB_STS_PHY_LINK_UP BIT(9)
#define PCIE_GLOBAL_INT_CAUSE1_REG 0x1C
#define PCIE_GLOBAL_INT_MASK1_REG 0x20
#define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C)
#define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20)
#define PCIE_INT_A_ASSERT_MASK BIT(9)
#define PCIE_INT_B_ASSERT_MASK BIT(10)
#define PCIE_INT_C_ASSERT_MASK BIT(11)
#define PCIE_INT_D_ASSERT_MASK BIT(12)
#define PCIE_ARCACHE_TRC_REG 0x50
#define PCIE_AWCACHE_TRC_REG 0x54
#define PCIE_ARUSER_REG 0x5C
#define PCIE_AWUSER_REG 0x60
#define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50)
#define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54)
#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C)
#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60)
/*
* AR/AW Cache defauls: Normal memory, Write-Back, Read / Write
* allocate
@ -72,11 +71,10 @@ struct armada8k_pcie {
static int armada8k_pcie_link_up(struct pcie_port *pp)
{
struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
u32 reg;
u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
reg = readl(pcie->base + PCIE_GLOBAL_STATUS_REG);
reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_STATUS_REG);
if ((reg & mask) == mask)
return 1;
@ -85,51 +83,50 @@ static int armada8k_pcie_link_up(struct pcie_port *pp)
return 0;
}
static void armada8k_pcie_establish_link(struct pcie_port *pp)
static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
{
struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
void __iomem *base = pcie->base;
struct pcie_port *pp = &pcie->pp;
u32 reg;
if (!dw_pcie_link_up(pp)) {
/* Disable LTSSM state machine to enable configuration */
reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
reg &= ~(PCIE_APP_LTSSM_EN);
writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
}
/* Set the device to root complex mode */
reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
/* Set the PCIe master AxCache attributes */
writel(ARCACHE_DEFAULT_VALUE, base + PCIE_ARCACHE_TRC_REG);
writel(AWCACHE_DEFAULT_VALUE, base + PCIE_AWCACHE_TRC_REG);
dw_pcie_writel_rc(pp, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE);
dw_pcie_writel_rc(pp, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE);
/* Set the PCIe master AxDomain attributes */
reg = readl(base + PCIE_ARUSER_REG);
reg = dw_pcie_readl_rc(pp, PCIE_ARUSER_REG);
reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
writel(reg, base + PCIE_ARUSER_REG);
dw_pcie_writel_rc(pp, PCIE_ARUSER_REG, reg);
reg = readl(base + PCIE_AWUSER_REG);
reg = dw_pcie_readl_rc(pp, PCIE_AWUSER_REG);
reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
writel(reg, base + PCIE_AWUSER_REG);
dw_pcie_writel_rc(pp, PCIE_AWUSER_REG, reg);
/* Enable INT A-D interrupts */
reg = readl(base + PCIE_GLOBAL_INT_MASK1_REG);
reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_MASK1_REG);
reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
writel(reg, base + PCIE_GLOBAL_INT_MASK1_REG);
dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_MASK1_REG, reg);
if (!dw_pcie_link_up(pp)) {
/* Configuration done. Start LTSSM */
reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
reg |= PCIE_APP_LTSSM_EN;
writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
}
/* Wait until the link becomes active again */
@ -139,15 +136,16 @@ static void armada8k_pcie_establish_link(struct pcie_port *pp)
static void armada8k_pcie_host_init(struct pcie_port *pp)
{
struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
dw_pcie_setup_rc(pp);
armada8k_pcie_establish_link(pp);
armada8k_pcie_establish_link(pcie);
}
static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
void __iomem *base = pcie->base;
struct armada8k_pcie *pcie = arg;
struct pcie_port *pp = &pcie->pp;
u32 val;
/*
@ -155,8 +153,8 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
* PCI device. However, they are also latched into the PCIe
* controller, so we simply discard them.
*/
val = readl(base + PCIE_GLOBAL_INT_CAUSE1_REG);
writel(val, base + PCIE_GLOBAL_INT_CAUSE1_REG);
val = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG);
dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG, val);
return IRQ_HANDLED;
}
@ -166,9 +164,10 @@ static struct pcie_host_ops armada8k_pcie_host_ops = {
.host_init = armada8k_pcie_host_init,
};
static int armada8k_add_pcie_port(struct pcie_port *pp,
static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &pcie->pp;
struct device *dev = &pdev->dev;
int ret;
@ -182,7 +181,7 @@ static int armada8k_add_pcie_port(struct pcie_port *pp,
}
ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
IRQF_SHARED, "armada8k-pcie", pp);
IRQF_SHARED, "armada8k-pcie", pcie);
if (ret) {
dev_err(dev, "failed to request irq %d\n", pp->irq);
return ret;
@ -217,7 +216,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
pp = &pcie->pp;
pp->dev = dev;
platform_set_drvdata(pdev, pcie);
/* Get the dw-pcie unit configuration/control registers base. */
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
@ -228,9 +226,7 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
goto fail;
}
pcie->base = pp->dbi_base + PCIE_VENDOR_REGS_OFFSET;
ret = armada8k_add_pcie_port(pp, pdev);
ret = armada8k_add_pcie_port(pcie, pdev);
if (ret)
goto fail;

View File

@ -27,9 +27,9 @@
#define to_artpec6_pcie(x) container_of(x, struct artpec6_pcie, pp)
struct artpec6_pcie {
struct pcie_port pp;
struct regmap *regmap;
void __iomem *phy_base;
struct pcie_port pp; /* pp.dbi_base is DT dbi */
struct regmap *regmap; /* DT axis,syscon-pcie */
void __iomem *phy_base; /* DT phy */
};
/* PCIe Port Logic registers (memory-mapped) */
@ -65,18 +65,31 @@ struct artpec6_pcie {
#define ARTPEC6_CPU_TO_BUS_ADDR 0x0fffffff
static int artpec6_pcie_establish_link(struct pcie_port *pp)
static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
u32 val;
regmap_read(artpec6_pcie->regmap, offset, &val);
return val;
}
static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val)
{
regmap_write(artpec6_pcie->regmap, offset, val);
}
static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
{
struct pcie_port *pp = &artpec6_pcie->pp;
u32 val;
unsigned int retries;
/* Hold DW core in reset */
regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_CORE_RESET_REQ;
regmap_write(artpec6_pcie->regmap, PCIECFG, val);
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
PCIECFG_MODE_TX_DRV_EN |
PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */
@ -84,27 +97,27 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp)
val |= PCIECFG_REFCLK_ENABLE;
val &= ~PCIECFG_DBG_OEN;
val &= ~PCIECFG_CLKREQ_B;
regmap_write(artpec6_pcie->regmap, PCIECFG, val);
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(5000, 6000);
regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
val |= NOCCFG_ENABLE_CLK_PCIE;
regmap_write(artpec6_pcie->regmap, NOCCFG, val);
artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
usleep_range(20, 30);
regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE;
regmap_write(artpec6_pcie->regmap, PCIECFG, val);
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(6000, 7000);
regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
regmap_write(artpec6_pcie->regmap, NOCCFG, val);
artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
retries = 50;
do {
usleep_range(1000, 2000);
regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
retries--;
} while (retries &&
(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
@ -117,16 +130,16 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp)
} while (retries && !(val & PHY_COSPLLLOCK));
/* Take DW core out of reset */
regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val &= ~PCIECFG_CORE_RESET_REQ;
regmap_write(artpec6_pcie->regmap, PCIECFG, val);
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(100, 200);
/*
* Enable writing to config regs. This is required as the Synopsys
* driver changes the class code. That register needs DBI write enable.
*/
writel(DBI_RO_WR_EN, pp->dbi_base + MISC_CONTROL_1_OFF);
dw_pcie_writel_rc(pp, MISC_CONTROL_1_OFF, DBI_RO_WR_EN);
pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
@ -137,78 +150,69 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
/* assert LTSSM enable */
regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_LTSSM_ENABLE;
regmap_write(artpec6_pcie->regmap, PCIECFG, val);
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
/* check if the link is up or not */
if (!dw_pcie_wait_for_link(pp))
return 0;
dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
return -ETIMEDOUT;
}
static void artpec6_pcie_enable_interrupts(struct pcie_port *pp)
static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
{
struct pcie_port *pp = &artpec6_pcie->pp;
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
}
static void artpec6_pcie_host_init(struct pcie_port *pp)
{
artpec6_pcie_establish_link(pp);
artpec6_pcie_enable_interrupts(pp);
}
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
static int artpec6_pcie_link_up(struct pcie_port *pp)
{
u32 rc;
/*
* Get status from Synopsys IP
* link is debug bit 36, debug register 1 starts at bit 32
*/
rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
if (rc)
return 1;
return 0;
artpec6_pcie_establish_link(artpec6_pcie);
artpec6_pcie_enable_interrupts(artpec6_pcie);
}
static struct pcie_host_ops artpec6_pcie_host_ops = {
.link_up = artpec6_pcie_link_up,
.host_init = artpec6_pcie_host_init,
};
static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct artpec6_pcie *artpec6_pcie = arg;
struct pcie_port *pp = &artpec6_pcie->pp;
return dw_handle_msi_irq(pp);
}
static int artpec6_add_pcie_port(struct pcie_port *pp,
static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &artpec6_pcie->pp;
struct device *dev = pp->dev;
int ret;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
if (pp->msi_irq <= 0) {
dev_err(&pdev->dev, "failed to get MSI irq\n");
dev_err(dev, "failed to get MSI irq\n");
return -ENODEV;
}
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
ret = devm_request_irq(dev, pp->msi_irq,
artpec6_pcie_msi_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"artpec6-pcie-msi", pp);
"artpec6-pcie-msi", artpec6_pcie);
if (ret) {
dev_err(&pdev->dev, "failed to request MSI irq\n");
dev_err(dev, "failed to request MSI irq\n");
return ret;
}
}
@ -218,7 +222,7 @@ static int artpec6_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -227,41 +231,40 @@ static int artpec6_add_pcie_port(struct pcie_port *pp,
static int artpec6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct artpec6_pcie *artpec6_pcie;
struct pcie_port *pp;
struct resource *dbi_base;
struct resource *phy_base;
int ret;
artpec6_pcie = devm_kzalloc(&pdev->dev, sizeof(*artpec6_pcie),
GFP_KERNEL);
artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL);
if (!artpec6_pcie)
return -ENOMEM;
pp = &artpec6_pcie->pp;
pp->dev = &pdev->dev;
pp->dev = dev;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
artpec6_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
if (IS_ERR(artpec6_pcie->phy_base))
return PTR_ERR(artpec6_pcie->phy_base);
artpec6_pcie->regmap =
syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
syscon_regmap_lookup_by_phandle(dev->of_node,
"axis,syscon-pcie");
if (IS_ERR(artpec6_pcie->regmap))
return PTR_ERR(artpec6_pcie->regmap);
ret = artpec6_add_pcie_port(pp, pdev);
ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
if (ret < 0)
return ret;
platform_set_drvdata(pdev, artpec6_pcie);
return 0;
}

View File

@ -25,8 +25,7 @@
#include "pcie-designware.h"
struct dw_plat_pcie {
void __iomem *mem_base;
struct pcie_port pp;
struct pcie_port pp; /* pp.dbi_base is DT 0th resource */
};
static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
@ -52,6 +51,7 @@ static struct pcie_host_ops dw_plat_pcie_host_ops = {
static int dw_plat_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
{
struct device *dev = pp->dev;
int ret;
pp->irq = platform_get_irq(pdev, 1);
@ -63,11 +63,11 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
if (pp->msi_irq < 0)
return pp->msi_irq;
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
ret = devm_request_irq(dev, pp->msi_irq,
dw_plat_pcie_msi_irq_handler,
IRQF_SHARED, "dw-plat-pcie-msi", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request MSI IRQ\n");
dev_err(dev, "failed to request MSI IRQ\n");
return ret;
}
}
@ -77,7 +77,7 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -86,31 +86,28 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_plat_pcie *dw_plat_pcie;
struct pcie_port *pp;
struct resource *res; /* Resource from DT */
int ret;
dw_plat_pcie = devm_kzalloc(&pdev->dev, sizeof(*dw_plat_pcie),
GFP_KERNEL);
dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
if (!dw_plat_pcie)
return -ENOMEM;
pp = &dw_plat_pcie->pp;
pp->dev = &pdev->dev;
pp->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dw_plat_pcie->mem_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dw_plat_pcie->mem_base))
return PTR_ERR(dw_plat_pcie->mem_base);
pp->dbi_base = dw_plat_pcie->mem_base;
pp->dbi_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
ret = dw_plat_add_pcie_port(pp, pdev);
if (ret < 0)
return ret;
platform_set_drvdata(pdev, dw_plat_pcie);
return 0;
}

View File

@ -141,41 +141,35 @@ int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
static inline u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
{
if (pp->ops->readl_rc)
return pp->ops->readl_rc(pp, pp->dbi_base + reg);
return pp->ops->readl_rc(pp, reg);
return readl(pp->dbi_base + reg);
}
static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
{
if (pp->ops->writel_rc)
pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
pp->ops->writel_rc(pp, reg, val);
else
writel(val, pp->dbi_base + reg);
}
static inline u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
static u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
if (pp->ops->readl_rc)
return pp->ops->readl_rc(pp, pp->dbi_base + offset + reg);
return readl(pp->dbi_base + offset + reg);
return dw_pcie_readl_rc(pp, offset + reg);
}
static inline void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index,
u32 val, u32 reg)
static void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index, u32 reg,
u32 val)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
if (pp->ops->writel_rc)
pp->ops->writel_rc(pp, val, pp->dbi_base + offset + reg);
else
writel(val, pp->dbi_base + offset + reg);
dw_pcie_writel_rc(pp, offset + reg, val);
}
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
@ -202,35 +196,35 @@ static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
u32 retries, val;
if (pp->iatu_unroll_enabled) {
dw_pcie_writel_unroll(pp, index,
lower_32_bits(cpu_addr), PCIE_ATU_UNR_LOWER_BASE);
dw_pcie_writel_unroll(pp, index,
upper_32_bits(cpu_addr), PCIE_ATU_UNR_UPPER_BASE);
dw_pcie_writel_unroll(pp, index,
lower_32_bits(cpu_addr + size - 1), PCIE_ATU_UNR_LIMIT);
dw_pcie_writel_unroll(pp, index,
lower_32_bits(pci_addr), PCIE_ATU_UNR_LOWER_TARGET);
dw_pcie_writel_unroll(pp, index,
upper_32_bits(pci_addr), PCIE_ATU_UNR_UPPER_TARGET);
dw_pcie_writel_unroll(pp, index,
type, PCIE_ATU_UNR_REGION_CTRL1);
dw_pcie_writel_unroll(pp, index,
PCIE_ATU_ENABLE, PCIE_ATU_UNR_REGION_CTRL2);
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_BASE,
lower_32_bits(cpu_addr));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_BASE,
upper_32_bits(cpu_addr));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LIMIT,
lower_32_bits(cpu_addr + size - 1));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_TARGET,
lower_32_bits(pci_addr));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_TARGET,
upper_32_bits(pci_addr));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL1,
type);
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL2,
PCIE_ATU_ENABLE);
} else {
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
PCIE_ATU_VIEWPORT);
dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr),
PCIE_ATU_LOWER_BASE);
dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr),
PCIE_ATU_UPPER_BASE);
dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
PCIE_ATU_LIMIT);
dw_pcie_writel_rc(pp, lower_32_bits(pci_addr),
PCIE_ATU_LOWER_TARGET);
dw_pcie_writel_rc(pp, upper_32_bits(pci_addr),
PCIE_ATU_UPPER_TARGET);
dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT,
PCIE_ATU_REGION_OUTBOUND | index);
dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_BASE,
lower_32_bits(cpu_addr));
dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_BASE,
upper_32_bits(cpu_addr));
dw_pcie_writel_rc(pp, PCIE_ATU_LIMIT,
lower_32_bits(cpu_addr + size - 1));
dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_TARGET,
lower_32_bits(pci_addr));
dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_TARGET,
upper_32_bits(pci_addr));
dw_pcie_writel_rc(pp, PCIE_ATU_CR1, type);
dw_pcie_writel_rc(pp, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
}
/*
@ -760,8 +754,8 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
return ret;
}
static int dw_pcie_valid_config(struct pcie_port *pp,
struct pci_bus *bus, int dev)
static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
int dev)
{
/* If there is no link, then there is no device */
if (bus->number != pp->root_bus_nr) {
@ -781,7 +775,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
{
struct pcie_port *pp = bus->sysdata;
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@ -797,7 +791,7 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
{
struct pcie_port *pp = bus->sysdata;
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
if (bus->number == pp->root_bus_nr)
@ -835,7 +829,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
return;
}
dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
dw_pcie_writel_rc(pp, PCIE_PORT_LINK_CONTROL, val);
/* set link width speed control register */
val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
@ -854,30 +848,30 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
break;
}
dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
/* setup RC BARs */
dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0x00000000);
/* setup interrupt pins */
val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE);
val &= 0xffff00ff;
val |= 0x00000100;
dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
dw_pcie_writel_rc(pp, PCI_INTERRUPT_LINE, val);
/* setup bus numbers */
val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
val &= 0xff000000;
val |= 0x00010100;
dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
dw_pcie_writel_rc(pp, PCI_PRIMARY_BUS, val);
/* setup command register */
val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_rc(pp, val, PCI_COMMAND);
dw_pcie_writel_rc(pp, PCI_COMMAND, val);
/*
* If the platform provides ->rd_other_conf, it means the platform

View File

@ -54,9 +54,8 @@ struct pcie_port {
};
struct pcie_host_ops {
u32 (*readl_rc)(struct pcie_port *pp, void __iomem *dbi_base);
void (*writel_rc)(struct pcie_port *pp,
u32 val, void __iomem *dbi_base);
u32 (*readl_rc)(struct pcie_port *pp, u32 reg);
void (*writel_rc)(struct pcie_port *pp, u32 reg, u32 val);
int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
@ -73,6 +72,8 @@ struct pcie_host_ops {
int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
};
u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg);
void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val);
int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val);
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);

View File

@ -22,51 +22,38 @@
#include "pcie-designware.h"
#define PCIE_LTSSM_LINKUP_STATE 0x11
#define PCIE_LTSSM_STATE_MASK 0x3F
#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
#define PCIE_SYS_STATE4 0x31c
#define PCIE_HIP06_CTRL_OFF 0x1000
#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
#define PCIE_HIP06_CTRL_OFF 0x1000
#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c)
#define PCIE_LTSSM_LINKUP_STATE 0x11
#define PCIE_LTSSM_STATE_MASK 0x3F
#define to_hisi_pcie(x) container_of(x, struct hisi_pcie, pp)
struct hisi_pcie;
struct pcie_soc_ops {
int (*hisi_pcie_link_up)(struct hisi_pcie *pcie);
int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie);
};
struct hisi_pcie {
struct pcie_port pp; /* pp.dbi_base is DT rc_dbi */
struct regmap *subctrl;
void __iomem *reg_base;
u32 port_id;
struct pcie_port pp;
struct pcie_soc_ops *soc_ops;
};
static inline void hisi_pcie_apb_writel(struct hisi_pcie *pcie,
u32 val, u32 reg)
{
writel(val, pcie->reg_base + reg);
}
static inline u32 hisi_pcie_apb_readl(struct hisi_pcie *pcie, u32 reg)
{
return readl(pcie->reg_base + reg);
}
/* HipXX PCIe host only supports 32-bit config access */
static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
u32 *val)
{
u32 reg;
u32 reg_val;
struct hisi_pcie *pcie = to_hisi_pcie(pp);
void *walker = &reg_val;
walker += (where & 0x3);
reg = where & ~0x3;
reg_val = hisi_pcie_apb_readl(pcie, reg);
reg_val = dw_pcie_readl_rc(pp, reg);
if (size == 1)
*val = *(u8 __force *) walker;
@ -86,21 +73,20 @@ static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size,
{
u32 reg_val;
u32 reg;
struct hisi_pcie *pcie = to_hisi_pcie(pp);
void *walker = &reg_val;
walker += (where & 0x3);
reg = where & ~0x3;
if (size == 4)
hisi_pcie_apb_writel(pcie, val, reg);
dw_pcie_writel_rc(pp, reg, val);
else if (size == 2) {
reg_val = hisi_pcie_apb_readl(pcie, reg);
reg_val = dw_pcie_readl_rc(pp, reg);
*(u16 __force *) walker = val;
hisi_pcie_apb_writel(pcie, reg_val, reg);
dw_pcie_writel_rc(pp, reg, reg_val);
} else if (size == 1) {
reg_val = hisi_pcie_apb_readl(pcie, reg);
reg_val = dw_pcie_readl_rc(pp, reg);
*(u8 __force *) walker = val;
hisi_pcie_apb_writel(pcie, reg_val, reg);
dw_pcie_writel_rc(pp, reg, reg_val);
} else
return PCIBIOS_BAD_REGISTER_NUMBER;
@ -119,10 +105,10 @@ static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie)
static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie)
{
struct pcie_port *pp = &hisi_pcie->pp;
u32 val;
val = hisi_pcie_apb_readl(hisi_pcie, PCIE_HIP06_CTRL_OFF +
PCIE_SYS_STATE4);
val = dw_pcie_readl_rc(pp, PCIE_SYS_STATE4);
return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
}
@ -140,19 +126,20 @@ static struct pcie_host_ops hisi_pcie_host_ops = {
.link_up = hisi_pcie_link_up,
};
static int hisi_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &hisi_pcie->pp;
struct device *dev = pp->dev;
int ret;
u32 port_id;
struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp);
if (of_property_read_u32(pdev->dev.of_node, "port-id", &port_id)) {
dev_err(&pdev->dev, "failed to read port-id\n");
if (of_property_read_u32(dev->of_node, "port-id", &port_id)) {
dev_err(dev, "failed to read port-id\n");
return -EINVAL;
}
if (port_id > 3) {
dev_err(&pdev->dev, "Invalid port-id: %d\n", port_id);
dev_err(dev, "Invalid port-id: %d\n", port_id);
return -EINVAL;
}
hisi_pcie->port_id = port_id;
@ -161,7 +148,7 @@ static int hisi_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -170,6 +157,7 @@ static int hisi_add_pcie_port(struct pcie_port *pp,
static int hisi_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hisi_pcie *hisi_pcie;
struct pcie_port *pp;
const struct of_device_id *match;
@ -177,40 +165,36 @@ static int hisi_pcie_probe(struct platform_device *pdev)
struct device_driver *driver;
int ret;
hisi_pcie = devm_kzalloc(&pdev->dev, sizeof(*hisi_pcie), GFP_KERNEL);
hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL);
if (!hisi_pcie)
return -ENOMEM;
pp = &hisi_pcie->pp;
pp->dev = &pdev->dev;
driver = (pdev->dev).driver;
pp->dev = dev;
driver = dev->driver;
match = of_match_device(driver->of_match_table, &pdev->dev);
match = of_match_device(driver->of_match_table, dev);
hisi_pcie->soc_ops = (struct pcie_soc_ops *) match->data;
hisi_pcie->subctrl =
syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
if (IS_ERR(hisi_pcie->subctrl)) {
dev_err(pp->dev, "cannot get subctrl base\n");
dev_err(dev, "cannot get subctrl base\n");
return PTR_ERR(hisi_pcie->subctrl);
}
reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
hisi_pcie->reg_base = devm_ioremap_resource(&pdev->dev, reg);
if (IS_ERR(hisi_pcie->reg_base)) {
dev_err(pp->dev, "cannot get rc_dbi base\n");
return PTR_ERR(hisi_pcie->reg_base);
pp->dbi_base = devm_ioremap_resource(dev, reg);
if (IS_ERR(pp->dbi_base)) {
dev_err(dev, "cannot get rc_dbi base\n");
return PTR_ERR(pp->dbi_base);
}
hisi_pcie->pp.dbi_base = hisi_pcie->reg_base;
ret = hisi_add_pcie_port(pp, pdev);
ret = hisi_add_pcie_port(hisi_pcie, pdev);
if (ret)
return ret;
platform_set_drvdata(pdev, hisi_pcie);
dev_warn(pp->dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
return 0;
}

View File

@ -42,19 +42,24 @@ static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
{
struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
LIST_HEAD(res);
struct resource res_mem;
int ret;
pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->dev = &bdev->dev;
bcma_set_drvdata(bdev, pcie);
pcie->dev = dev;
pcie->base = bdev->io_addr;
if (!pcie->base) {
dev_err(dev, "no controller registers\n");
return -ENOMEM;
}
pcie->base_addr = bdev->addr;
res_mem.start = bdev->addr_s[0];
@ -67,10 +72,11 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
ret = iproc_pcie_setup(pcie, &res);
if (ret)
dev_err(pcie->dev, "PCIe controller setup failed\n");
dev_err(dev, "PCIe controller setup failed\n");
pci_free_resource_list(&res);
bcma_set_drvdata(bdev, pcie);
return ret;
}

View File

@ -40,35 +40,35 @@ MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
struct iproc_pcie *pcie;
struct device_node *np = pdev->dev.of_node;
struct device_node *np = dev->of_node;
struct resource reg;
resource_size_t iobase = 0;
LIST_HEAD(res);
int ret;
of_id = of_match_device(iproc_pcie_of_match_table, &pdev->dev);
of_id = of_match_device(iproc_pcie_of_match_table, dev);
if (!of_id)
return -EINVAL;
pcie = devm_kzalloc(&pdev->dev, sizeof(struct iproc_pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->dev = &pdev->dev;
pcie->dev = dev;
pcie->type = (enum iproc_pcie_type)of_id->data;
platform_set_drvdata(pdev, pcie);
ret = of_address_to_resource(np, 0, &reg);
if (ret < 0) {
dev_err(pcie->dev, "unable to obtain controller resources\n");
dev_err(dev, "unable to obtain controller resources\n");
return ret;
}
pcie->base = devm_ioremap(pcie->dev, reg.start, resource_size(&reg));
pcie->base = devm_ioremap(dev, reg.start, resource_size(&reg));
if (!pcie->base) {
dev_err(pcie->dev, "unable to map controller registers\n");
dev_err(dev, "unable to map controller registers\n");
return -ENOMEM;
}
pcie->base_addr = reg.start;
@ -79,7 +79,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset",
&val);
if (ret) {
dev_err(pcie->dev,
dev_err(dev,
"missing brcm,pcie-ob-axi-offset property\n");
return ret;
}
@ -88,7 +88,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "brcm,pcie-ob-window-size",
&val);
if (ret) {
dev_err(pcie->dev,
dev_err(dev,
"missing brcm,pcie-ob-window-size property\n");
return ret;
}
@ -101,7 +101,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
}
/* PHY use is optional */
pcie->phy = devm_phy_get(&pdev->dev, "pcie-phy");
pcie->phy = devm_phy_get(dev, "pcie-phy");
if (IS_ERR(pcie->phy)) {
if (PTR_ERR(pcie->phy) == -EPROBE_DEFER)
return -EPROBE_DEFER;
@ -110,7 +110,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase);
if (ret) {
dev_err(pcie->dev,
dev_err(dev,
"unable to get PCI host bridge resources\n");
return ret;
}
@ -119,10 +119,11 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = iproc_pcie_setup(pcie, &res);
if (ret)
dev_err(pcie->dev, "PCIe controller setup failed\n");
dev_err(dev, "PCIe controller setup failed\n");
pci_free_resource_list(&res);
platform_set_drvdata(pdev, pcie);
return ret;
}

View File

@ -63,6 +63,8 @@
#define OARR_SIZE_CFG_SHIFT 1
#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
#define PCI_EXP_CAP 0xac
#define MAX_NUM_OB_WINDOWS 2
#define IPROC_PCIE_REG_INVALID 0xffff
@ -258,9 +260,10 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie)
static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
{
struct device *dev = pcie->dev;
u8 hdr_type;
u32 link_ctrl, class, val;
u16 pos, link_status;
u16 pos = PCI_EXP_CAP, link_status;
bool link_is_active = false;
/*
@ -272,14 +275,14 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
dev_err(pcie->dev, "PHY or data link is INACTIVE!\n");
dev_err(dev, "PHY or data link is INACTIVE!\n");
return -ENODEV;
}
/* make sure we are not in EP mode */
pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
dev_err(pcie->dev, "in EP mode, hdr=%#02x\n", hdr_type);
dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
return -EFAULT;
}
@ -293,30 +296,27 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class);
/* check link status to see if link is active */
pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
link_is_active = true;
if (!link_is_active) {
/* try GEN 1 link speed */
#define PCI_LINK_STATUS_CTRL_2_OFFSET 0x0dc
#define PCI_TARGET_LINK_SPEED_MASK 0xf
#define PCI_TARGET_LINK_SPEED_GEN2 0x2
#define PCI_TARGET_LINK_SPEED_GEN1 0x1
pci_bus_read_config_dword(bus, 0,
PCI_LINK_STATUS_CTRL_2_OFFSET,
pos + PCI_EXP_LNKCTL2,
&link_ctrl);
if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
PCI_TARGET_LINK_SPEED_GEN2) {
link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
pci_bus_write_config_dword(bus, 0,
PCI_LINK_STATUS_CTRL_2_OFFSET,
pos + PCI_EXP_LNKCTL2,
link_ctrl);
msleep(100);
pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
&link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
@ -324,7 +324,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
}
}
dev_info(pcie->dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
return link_is_active ? 0 : -ENODEV;
}
@ -349,12 +349,13 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
u64 pci_addr, resource_size_t size)
{
struct iproc_pcie_ob *ob = &pcie->ob;
struct device *dev = pcie->dev;
unsigned i;
u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
u64 remainder;
if (size > max_size) {
dev_err(pcie->dev,
dev_err(dev,
"res size %pap exceeds max supported size 0x%llx\n",
&size, max_size);
return -EINVAL;
@ -362,15 +363,14 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
div64_u64_rem(size, ob->window_size, &remainder);
if (remainder) {
dev_err(pcie->dev,
dev_err(dev,
"res size %pap needs to be multiple of window size %pap\n",
&size, &ob->window_size);
return -EINVAL;
}
if (axi_addr < ob->axi_offset) {
dev_err(pcie->dev,
"axi address %pap less than offset %pap\n",
dev_err(dev, "axi address %pap less than offset %pap\n",
&axi_addr, &ob->axi_offset);
return -EINVAL;
}
@ -406,6 +406,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
struct list_head *resources)
{
struct device *dev = pcie->dev;
struct resource_entry *window;
int ret;
@ -425,7 +426,7 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
return ret;
break;
default:
dev_err(pcie->dev, "invalid resource %pR\n", res);
dev_err(dev, "invalid resource %pR\n", res);
return -EINVAL;
}
}
@ -455,26 +456,25 @@ static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
{
struct device *dev;
int ret;
void *sysdata;
struct pci_bus *bus;
if (!pcie || !pcie->dev || !pcie->base)
return -EINVAL;
ret = devm_request_pci_bus_resources(pcie->dev, res);
dev = pcie->dev;
ret = devm_request_pci_bus_resources(dev, res);
if (ret)
return ret;
ret = phy_init(pcie->phy);
if (ret) {
dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
dev_err(dev, "unable to initialize PCIe PHY\n");
return ret;
}
ret = phy_power_on(pcie->phy);
if (ret) {
dev_err(pcie->dev, "unable to power on PCIe PHY\n");
dev_err(dev, "unable to power on PCIe PHY\n");
goto err_exit_phy;
}
@ -486,7 +486,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
pcie->reg_offsets = iproc_pcie_reg_paxc;
break;
default:
dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
dev_err(dev, "incompatible iProc PCIe interface\n");
ret = -EINVAL;
goto err_power_off_phy;
}
@ -496,7 +496,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (pcie->need_ob_cfg) {
ret = iproc_pcie_map_ranges(pcie, res);
if (ret) {
dev_err(pcie->dev, "map failed\n");
dev_err(dev, "map failed\n");
goto err_power_off_phy;
}
}
@ -508,9 +508,9 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
sysdata = pcie;
#endif
bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, sysdata, res);
bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res);
if (!bus) {
dev_err(pcie->dev, "unable to create PCI root bus\n");
dev_err(dev, "unable to create PCI root bus\n");
ret = -ENOMEM;
goto err_power_off_phy;
}
@ -518,7 +518,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
ret = iproc_pcie_check_link(pcie, bus);
if (ret) {
dev_err(pcie->dev, "no PCIe EP device detected\n");
dev_err(dev, "no PCIe EP device detected\n");
goto err_rm_root_bus;
}
@ -526,7 +526,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (IS_ENABLED(CONFIG_PCI_MSI))
if (iproc_pcie_msi_enable(pcie))
dev_info(pcie->dev, "not using iProc MSI\n");
dev_info(dev, "not using iProc MSI\n");
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);

View File

@ -86,12 +86,10 @@ struct qcom_pcie_ops {
};
struct qcom_pcie {
struct pcie_port pp;
struct device *dev;
struct pcie_port pp; /* pp.dbi_base is DT dbi */
void __iomem *parf; /* DT parf */
void __iomem *elbi; /* DT elbi */
union qcom_pcie_resources res;
void __iomem *parf;
void __iomem *dbi;
void __iomem *elbi;
struct phy *phy;
struct gpio_desc *reset;
struct qcom_pcie_ops *ops;
@ -136,7 +134,7 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
struct device *dev = pcie->dev;
struct device *dev = pcie->pp.dev;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
@ -188,7 +186,7 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
struct device *dev = pcie->dev;
struct device *dev = pcie->pp.dev;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
@ -237,7 +235,7 @@ static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
struct device *dev = pcie->dev;
struct device *dev = pcie->pp.dev;
u32 val;
int ret;
@ -359,7 +357,7 @@ static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
struct device *dev = pcie->dev;
struct device *dev = pcie->pp.dev;
int ret;
ret = reset_control_deassert(res->core);
@ -426,7 +424,7 @@ static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
static int qcom_pcie_link_up(struct pcie_port *pp)
{
struct qcom_pcie *pcie = to_qcom_pcie(pp);
u16 val = readw(pcie->dbi + PCIE20_CAP + PCI_EXP_LNKSTA);
u16 val = readw(pcie->pp.dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
@ -509,8 +507,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (!pcie)
return -ENOMEM;
pp = &pcie->pp;
pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
pcie->dev = dev;
pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
if (IS_ERR(pcie->reset))
@ -522,9 +520,9 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->parf);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
pcie->dbi = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->dbi))
return PTR_ERR(pcie->dbi);
pp->dbi_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
pcie->elbi = devm_ioremap_resource(dev, res);
@ -539,9 +537,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
pp = &pcie->pp;
pp->dev = dev;
pp->dbi_base = pcie->dbi;
pp->root_bus_nr = -1;
pp->ops = &qcom_pcie_dw_ops;
@ -569,8 +565,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return ret;
}
platform_set_drvdata(pdev, pcie);
return 0;
}

View File

@ -31,8 +31,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#define DRV_NAME "rcar-pcie"
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
#define CONFIG_SEND_ENABLE (1 << 31)
@ -397,6 +395,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
unsigned int timeout = 1000;
u32 macsr;
@ -404,7 +403,7 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
return;
if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
dev_err(pcie->dev, "Speed change already in progress\n");
dev_err(dev, "Speed change already in progress\n");
return;
}
@ -433,7 +432,7 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
rcar_pci_write_reg(pcie, macsr, MACSR);
if (macsr & SPCHGFAIL)
dev_err(pcie->dev, "Speed change failed\n");
dev_err(dev, "Speed change failed\n");
goto done;
}
@ -441,15 +440,16 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
msleep(1);
};
dev_err(pcie->dev, "Speed change timed out\n");
dev_err(dev, "Speed change timed out\n");
done:
dev_info(pcie->dev, "Current link speed is %s GT/s\n",
dev_info(dev, "Current link speed is %s GT/s\n",
(macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
}
static int rcar_pcie_enable(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
struct pci_bus *bus, *child;
LIST_HEAD(res);
@ -461,14 +461,14 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
if (IS_ENABLED(CONFIG_PCI_MSI))
bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
bus = pci_scan_root_bus_msi(dev, pcie->root_bus_nr,
&rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
else
bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr,
bus = pci_scan_root_bus(dev, pcie->root_bus_nr,
&rcar_pcie_ops, pcie, &res);
if (!bus) {
dev_err(pcie->dev, "Scanning rootbus failed");
dev_err(dev, "Scanning rootbus failed");
return -ENODEV;
}
@ -487,6 +487,7 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
static int phy_wait_for_ack(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
unsigned int timeout = 100;
while (timeout--) {
@ -496,7 +497,7 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie)
udelay(100);
}
dev_err(pcie->dev, "Access to PCIe phy timed out\n");
dev_err(dev, "Access to PCIe phy timed out\n");
return -ETIMEDOUT;
}
@ -697,6 +698,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
{
struct rcar_pcie *pcie = data;
struct rcar_msi *msi = &pcie->msi;
struct device *dev = pcie->dev;
unsigned long reg;
reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
@ -717,10 +719,10 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
if (test_bit(index, msi->used))
generic_handle_irq(irq);
else
dev_info(pcie->dev, "unhandled MSI\n");
dev_info(dev, "unhandled MSI\n");
} else {
/* Unknown MSI, just clear it */
dev_dbg(pcie->dev, "unexpected MSI\n");
dev_dbg(dev, "unexpected MSI\n");
}
/* see if there's any more pending in this vector */
@ -843,22 +845,22 @@ static const struct irq_domain_ops msi_domain_ops = {
static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct rcar_msi *msi = &pcie->msi;
unsigned long base;
int err, i;
mutex_init(&msi->lock);
msi->chip.dev = pcie->dev;
msi->chip.dev = dev;
msi->chip.setup_irq = rcar_msi_setup_irq;
msi->chip.setup_irqs = rcar_msi_setup_irqs;
msi->chip.teardown_irq = rcar_msi_teardown_irq;
msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
&msi_domain_ops, &msi->chip);
if (!msi->domain) {
dev_err(&pdev->dev, "failed to create IRQ domain\n");
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@ -866,19 +868,19 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
irq_create_mapping(msi->domain, i);
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
@ -899,32 +901,32 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
return err;
}
static int rcar_pcie_get_resources(struct platform_device *pdev,
struct rcar_pcie *pcie)
static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
struct resource res;
int err, i;
err = of_address_to_resource(pdev->dev.of_node, 0, &res);
err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
return err;
pcie->base = devm_ioremap_resource(&pdev->dev, &res);
pcie->base = devm_ioremap_resource(dev, &res);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
pcie->clk = devm_clk_get(&pdev->dev, "pcie");
pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(pcie->clk)) {
dev_err(pcie->dev, "cannot get platform clock\n");
dev_err(dev, "cannot get platform clock\n");
return PTR_ERR(pcie->clk);
}
err = clk_prepare_enable(pcie->clk);
if (err)
return err;
pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(pcie->bus_clk)) {
dev_err(pcie->dev, "cannot get pcie bus clock\n");
dev_err(dev, "cannot get pcie bus clock\n");
err = PTR_ERR(pcie->bus_clk);
goto fail_clk;
}
@ -932,17 +934,17 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
if (err)
goto fail_clk;
i = irq_of_parse_and_map(pdev->dev.of_node, 0);
i = irq_of_parse_and_map(dev->of_node, 0);
if (!i) {
dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
goto err_map_reg;
}
pcie->msi.irq1 = i;
i = irq_of_parse_and_map(pdev->dev.of_node, 1);
i = irq_of_parse_and_map(dev->of_node, 1);
if (!i) {
dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
goto err_map_reg;
}
@ -1119,60 +1121,60 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
static int rcar_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rcar_pcie *pcie;
unsigned int data;
const struct of_device_id *of_id;
int err;
int (*hw_init_fn)(struct rcar_pcie *);
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->dev = &pdev->dev;
platform_set_drvdata(pdev, pcie);
pcie->dev = dev;
INIT_LIST_HEAD(&pcie->resources);
rcar_pcie_parse_request_of_pci_ranges(pcie);
err = rcar_pcie_get_resources(pdev, pcie);
err = rcar_pcie_get_resources(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request resources: %d\n", err);
dev_err(dev, "failed to request resources: %d\n", err);
return err;
}
err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
if (err)
return err;
of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
of_id = of_match_device(rcar_pcie_of_match, dev);
if (!of_id || !of_id->data)
return -EINVAL;
hw_init_fn = of_id->data;
pm_runtime_enable(pcie->dev);
err = pm_runtime_get_sync(pcie->dev);
pm_runtime_enable(dev);
err = pm_runtime_get_sync(dev);
if (err < 0) {
dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
dev_err(dev, "pm_runtime_get_sync failed\n");
goto err_pm_disable;
}
/* Failure to get a link might just be that no cards are inserted */
err = hw_init_fn(pcie);
if (err) {
dev_info(&pdev->dev, "PCIe link down\n");
dev_info(dev, "PCIe link down\n");
err = 0;
goto err_pm_put;
}
data = rcar_pci_read_reg(pcie, MACSR);
dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = rcar_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(&pdev->dev,
dev_err(dev,
"failed to enable MSI support: %d\n",
err);
goto err_pm_put;
@ -1186,16 +1188,16 @@ static int rcar_pcie_probe(struct platform_device *pdev)
return 0;
err_pm_put:
pm_runtime_put(pcie->dev);
pm_runtime_put(dev);
err_pm_disable:
pm_runtime_disable(pcie->dev);
pm_runtime_disable(dev);
return err;
}
static struct platform_driver rcar_pcie_driver = {
.driver = {
.name = DRV_NAME,
.name = "rcar-pcie",
.of_match_table = rcar_pcie_of_match,
.suppress_bind_attrs = true,
},

View File

@ -972,7 +972,7 @@ static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
return -EINVAL;
if (region_no == 0) {
if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
return -EINVAL;
return -EINVAL;
}
if (region_no != 0) {
if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
@ -1091,8 +1091,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (err)
goto err_vpcie;
platform_set_drvdata(pdev, rockchip);
rockchip_pcie_enable_interrupts(rockchip);
err = rockchip_pcie_init_irq_domain(rockchip);

View File

@ -25,10 +25,10 @@
#include "pcie-designware.h"
struct spear13xx_pcie {
struct pcie_port pp; /* DT dbi is pp.dbi_base */
void __iomem *app_base;
struct phy *phy;
struct clk *clk;
struct pcie_port pp;
bool is_gen1;
};
@ -57,96 +57,26 @@ struct pcie_app_reg {
};
/* CR0 ID */
#define RX_LANE_FLIP_EN_ID 0
#define TX_LANE_FLIP_EN_ID 1
#define SYS_AUX_PWR_DET_ID 2
#define APP_LTSSM_ENABLE_ID 3
#define SYS_ATTEN_BUTTON_PRESSED_ID 4
#define SYS_MRL_SENSOR_STATE_ID 5
#define SYS_PWR_FAULT_DET_ID 6
#define SYS_MRL_SENSOR_CHGED_ID 7
#define SYS_PRE_DET_CHGED_ID 8
#define SYS_CMD_CPLED_INT_ID 9
#define APP_INIT_RST_0_ID 11
#define APP_REQ_ENTR_L1_ID 12
#define APP_READY_ENTR_L23_ID 13
#define APP_REQ_EXIT_L1_ID 14
#define DEVICE_TYPE_EP (0 << 25)
#define DEVICE_TYPE_LEP (1 << 25)
#define DEVICE_TYPE_RC (4 << 25)
#define SYS_INT_ID 29
#define MISCTRL_EN_ID 30
#define REG_TRANSLATION_ENABLE 31
/* CR1 ID */
#define APPS_PM_XMT_TURNOFF_ID 2
#define APPS_PM_XMT_PME_ID 5
/* CR3 ID */
#define XMLH_LTSSM_STATE_DETECT_QUIET 0x00
#define XMLH_LTSSM_STATE_DETECT_ACT 0x01
#define XMLH_LTSSM_STATE_POLL_ACTIVE 0x02
#define XMLH_LTSSM_STATE_POLL_COMPLIANCE 0x03
#define XMLH_LTSSM_STATE_POLL_CONFIG 0x04
#define XMLH_LTSSM_STATE_PRE_DETECT_QUIET 0x05
#define XMLH_LTSSM_STATE_DETECT_WAIT 0x06
#define XMLH_LTSSM_STATE_CFG_LINKWD_START 0x07
#define XMLH_LTSSM_STATE_CFG_LINKWD_ACEPT 0x08
#define XMLH_LTSSM_STATE_CFG_LANENUM_WAIT 0x09
#define XMLH_LTSSM_STATE_CFG_LANENUM_ACEPT 0x0A
#define XMLH_LTSSM_STATE_CFG_COMPLETE 0x0B
#define XMLH_LTSSM_STATE_CFG_IDLE 0x0C
#define XMLH_LTSSM_STATE_RCVRY_LOCK 0x0D
#define XMLH_LTSSM_STATE_RCVRY_SPEED 0x0E
#define XMLH_LTSSM_STATE_RCVRY_RCVRCFG 0x0F
#define XMLH_LTSSM_STATE_RCVRY_IDLE 0x10
#define XMLH_LTSSM_STATE_L0 0x11
#define XMLH_LTSSM_STATE_L0S 0x12
#define XMLH_LTSSM_STATE_L123_SEND_EIDLE 0x13
#define XMLH_LTSSM_STATE_L1_IDLE 0x14
#define XMLH_LTSSM_STATE_L2_IDLE 0x15
#define XMLH_LTSSM_STATE_L2_WAKE 0x16
#define XMLH_LTSSM_STATE_DISABLED_ENTRY 0x17
#define XMLH_LTSSM_STATE_DISABLED_IDLE 0x18
#define XMLH_LTSSM_STATE_DISABLED 0x19
#define XMLH_LTSSM_STATE_LPBK_ENTRY 0x1A
#define XMLH_LTSSM_STATE_LPBK_ACTIVE 0x1B
#define XMLH_LTSSM_STATE_LPBK_EXIT 0x1C
#define XMLH_LTSSM_STATE_LPBK_EXIT_TIMEOUT 0x1D
#define XMLH_LTSSM_STATE_HOT_RESET_ENTRY 0x1E
#define XMLH_LTSSM_STATE_HOT_RESET 0x1F
#define XMLH_LTSSM_STATE_MASK 0x3F
#define XMLH_LINK_UP (1 << 6)
/* CR4 ID */
#define CFG_MSI_EN_ID 18
/* CR6 */
#define INTA_CTRL_INT (1 << 7)
#define INTB_CTRL_INT (1 << 8)
#define INTC_CTRL_INT (1 << 9)
#define INTD_CTRL_INT (1 << 10)
#define MSI_CTRL_INT (1 << 26)
/* CR19 ID */
#define VEN_MSI_REQ_ID 11
#define VEN_MSI_FUN_NUM_ID 8
#define VEN_MSI_TC_ID 5
#define VEN_MSI_VECTOR_ID 0
#define VEN_MSI_REQ_EN ((u32)0x1 << VEN_MSI_REQ_ID)
#define VEN_MSI_FUN_NUM_MASK ((u32)0x7 << VEN_MSI_FUN_NUM_ID)
#define VEN_MSI_TC_MASK ((u32)0x7 << VEN_MSI_TC_ID)
#define VEN_MSI_VECTOR_MASK ((u32)0x1F << VEN_MSI_VECTOR_ID)
#define EXP_CAP_ID_OFFSET 0x70
#define to_spear13xx_pcie(x) container_of(x, struct spear13xx_pcie, pp)
static int spear13xx_pcie_establish_link(struct pcie_port *pp)
static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
{
u32 val;
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
struct pcie_port *pp = &spear13xx_pcie->pp;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
u32 val;
u32 exp_cap_off = EXP_CAP_ID_OFFSET;
if (dw_pcie_link_up(pp)) {
@ -203,9 +133,9 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp)
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
struct spear13xx_pcie *spear13xx_pcie = arg;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
struct pcie_port *pp = &spear13xx_pcie->pp;
unsigned int status;
status = readl(&app_reg->int_sts);
@ -220,9 +150,9 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
static void spear13xx_pcie_enable_interrupts(struct pcie_port *pp)
static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
struct pcie_port *pp = &spear13xx_pcie->pp;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
/* Enable MSI interrupt */
@ -246,8 +176,10 @@ static int spear13xx_pcie_link_up(struct pcie_port *pp)
static void spear13xx_pcie_host_init(struct pcie_port *pp)
{
spear13xx_pcie_establish_link(pp);
spear13xx_pcie_enable_interrupts(pp);
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
spear13xx_pcie_establish_link(spear13xx_pcie);
spear13xx_pcie_enable_interrupts(spear13xx_pcie);
}
static struct pcie_host_ops spear13xx_pcie_host_ops = {
@ -255,10 +187,11 @@ static struct pcie_host_ops spear13xx_pcie_host_ops = {
.host_init = spear13xx_pcie_host_init,
};
static int spear13xx_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pcie_port *pp = &spear13xx_pcie->pp;
struct device *dev = pp->dev;
int ret;
pp->irq = platform_get_irq(pdev, 0);
@ -268,7 +201,7 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp,
}
ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"spear1340-pcie", pp);
"spear1340-pcie", spear13xx_pcie);
if (ret) {
dev_err(dev, "failed to request irq %d\n", pp->irq);
return ret;
@ -288,10 +221,10 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp,
static int spear13xx_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spear13xx_pcie *spear13xx_pcie;
struct pcie_port *pp;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct device_node *np = dev->of_node;
struct resource *dbi_base;
int ret;
@ -323,7 +256,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
}
pp = &spear13xx_pcie->pp;
pp->dev = dev;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
@ -338,7 +270,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
if (of_property_read_bool(np, "st,pcie-is-gen1"))
spear13xx_pcie->is_gen1 = true;
ret = spear13xx_add_pcie_port(pp, pdev);
ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev);
if (ret < 0)
goto fail_clk;

View File

@ -212,6 +212,7 @@ static bool nwl_phy_link_up(struct nwl_pcie *pcie)
static int nwl_wait_for_link(struct nwl_pcie *pcie)
{
struct device *dev = pcie->dev;
int retries;
/* check if the link is up or not */
@ -221,7 +222,7 @@ static int nwl_wait_for_link(struct nwl_pcie *pcie)
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
dev_err(pcie->dev, "PHY link never came up\n");
dev_err(dev, "PHY link never came up\n");
return -ETIMEDOUT;
}
@ -277,6 +278,7 @@ static struct pci_ops nwl_pcie_ops = {
static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
{
struct nwl_pcie *pcie = data;
struct device *dev = pcie->dev;
u32 misc_stat;
/* Checking for misc interrupts */
@ -286,45 +288,43 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
return IRQ_NONE;
if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
dev_err(pcie->dev, "Received Message FIFO Overflow\n");
dev_err(dev, "Received Message FIFO Overflow\n");
if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
dev_err(pcie->dev, "Slave error\n");
dev_err(dev, "Slave error\n");
if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
dev_err(pcie->dev, "Master error\n");
dev_err(dev, "Master error\n");
if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
dev_err(pcie->dev,
"In Misc Ingress address translation error\n");
dev_err(dev, "In Misc Ingress address translation error\n");
if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
dev_err(pcie->dev,
"In Misc Egress address translation error\n");
dev_err(dev, "In Misc Egress address translation error\n");
if (misc_stat & MSGF_MISC_SR_FATAL_AER)
dev_err(pcie->dev, "Fatal Error in AER Capability\n");
dev_err(dev, "Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
dev_err(pcie->dev, "Non-Fatal Error in AER Capability\n");
dev_err(dev, "Non-Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_CORR_AER)
dev_err(pcie->dev, "Correctable Error in AER Capability\n");
dev_err(dev, "Correctable Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_UR_DETECT)
dev_err(pcie->dev, "Unsupported request Detected\n");
dev_err(dev, "Unsupported request Detected\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
dev_err(pcie->dev, "Non-Fatal Error Detected\n");
dev_err(dev, "Non-Fatal Error Detected\n");
if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
dev_err(pcie->dev, "Fatal Error Detected\n");
dev_err(dev, "Fatal Error Detected\n");
if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
dev_info(pcie->dev, "Link Autonomous Bandwidth Management Status bit set\n");
dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
dev_info(pcie->dev, "Link Bandwidth Management Status bit set\n");
dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */
nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
@ -494,20 +494,21 @@ static const struct irq_domain_ops dev_msi_domain_ops = {
static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
struct fwnode_handle *fwnode = of_node_to_fwnode(pcie->dev->of_node);
struct device *dev = pcie->dev;
struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
struct nwl_msi *msi = &pcie->msi;
msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
&dev_msi_domain_ops, pcie);
if (!msi->dev_domain) {
dev_err(pcie->dev, "failed to create dev IRQ domain\n");
dev_err(dev, "failed to create dev IRQ domain\n");
return -ENOMEM;
}
msi->msi_domain = pci_msi_create_irq_domain(fwnode,
&nwl_msi_domain_info,
msi->dev_domain);
if (!msi->msi_domain) {
dev_err(pcie->dev, "failed to create msi IRQ domain\n");
dev_err(dev, "failed to create msi IRQ domain\n");
irq_domain_remove(msi->dev_domain);
return -ENOMEM;
}
@ -517,12 +518,13 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
{
struct device_node *node = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node;
struct device_node *legacy_intc_node;
legacy_intc_node = of_get_next_child(node, NULL);
if (!legacy_intc_node) {
dev_err(pcie->dev, "No legacy intc node found\n");
dev_err(dev, "No legacy intc node found\n");
return -EINVAL;
}
@ -532,7 +534,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
pcie);
if (!pcie->legacy_irq_domain) {
dev_err(pcie->dev, "failed to create IRQ domain\n");
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@ -542,7 +544,8 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct nwl_msi *msi = &pcie->msi;
unsigned long base;
int ret;
@ -557,7 +560,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
/* Get msi_1 IRQ number */
msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
if (msi->irq_msi1 < 0) {
dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi1);
dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1);
ret = -EINVAL;
goto err;
}
@ -568,7 +571,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
/* Get msi_0 IRQ number */
msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
if (msi->irq_msi0 < 0) {
dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi0);
dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0);
ret = -EINVAL;
goto err;
}
@ -579,7 +582,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
/* Check for msii_present bit */
ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
if (!ret) {
dev_err(pcie->dev, "MSI not present\n");
dev_err(dev, "MSI not present\n");
ret = -EIO;
goto err;
}
@ -628,13 +631,14 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
u32 breg_val, ecam_val, first_busno = 0;
int err;
breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
if (!breg_val) {
dev_err(pcie->dev, "BREG is not present\n");
dev_err(dev, "BREG is not present\n");
return breg_val;
}
@ -665,7 +669,7 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
if (!ecam_val) {
dev_err(pcie->dev, "ECAM is not present\n");
dev_err(dev, "ECAM is not present\n");
return ecam_val;
}
@ -692,23 +696,23 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
if (nwl_pcie_link_up(pcie))
dev_info(pcie->dev, "Link is UP\n");
dev_info(dev, "Link is UP\n");
else
dev_info(pcie->dev, "Link is DOWN\n");
dev_info(dev, "Link is DOWN\n");
/* Get misc IRQ number */
pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
if (pcie->irq_misc < 0) {
dev_err(&pdev->dev, "failed to get misc IRQ %d\n",
dev_err(dev, "failed to get misc IRQ %d\n",
pcie->irq_misc);
return -EINVAL;
}
err = devm_request_irq(pcie->dev, pcie->irq_misc,
err = devm_request_irq(dev, pcie->irq_misc,
nwl_pcie_misc_handler, IRQF_SHARED,
"nwl_pcie:misc", pcie);
if (err) {
dev_err(pcie->dev, "fail to register misc IRQ#%d\n",
dev_err(dev, "fail to register misc IRQ#%d\n",
pcie->irq_misc);
return err;
}
@ -744,31 +748,32 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
struct platform_device *pdev)
{
struct device_node *node = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node;
struct resource *res;
const char *type;
/* Check for device type */
type = of_get_property(node, "device_type", NULL);
if (!type || strcmp(type, "pci")) {
dev_err(pcie->dev, "invalid \"device_type\" %s\n", type);
dev_err(dev, "invalid \"device_type\" %s\n", type);
return -EINVAL;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
pcie->breg_base = devm_ioremap_resource(pcie->dev, res);
pcie->breg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->breg_base))
return PTR_ERR(pcie->breg_base);
pcie->phys_breg_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
pcie->pcireg_base = devm_ioremap_resource(pcie->dev, res);
pcie->pcireg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->pcireg_base))
return PTR_ERR(pcie->pcireg_base);
pcie->phys_pcie_reg_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
pcie->ecam_base = devm_ioremap_resource(pcie->dev, res);
pcie->ecam_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->ecam_base))
return PTR_ERR(pcie->ecam_base);
pcie->phys_ecam_base = res->start;
@ -776,8 +781,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
/* Get intx IRQ number */
pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
if (pcie->irq_intx < 0) {
dev_err(&pdev->dev, "failed to get intx IRQ %d\n",
pcie->irq_intx);
dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx);
return -EINVAL;
}
@ -794,7 +798,8 @@ static const struct of_device_id nwl_pcie_of_match[] = {
static int nwl_pcie_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct nwl_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
@ -802,42 +807,42 @@ static int nwl_pcie_probe(struct platform_device *pdev)
resource_size_t iobase = 0;
LIST_HEAD(res);
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->dev = &pdev->dev;
pcie->dev = dev;
pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
err = nwl_pcie_parse_dt(pcie, pdev);
if (err) {
dev_err(pcie->dev, "Parsing DT failed\n");
dev_err(dev, "Parsing DT failed\n");
return err;
}
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(pcie->dev, "HW Initialization failed\n");
dev_err(dev, "HW Initialization failed\n");
return err;
}
err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
if (err) {
dev_err(pcie->dev, "Getting bridge resources failed\n");
dev_err(dev, "Getting bridge resources failed\n");
return err;
}
err = devm_request_pci_bus_resources(pcie->dev, &res);
err = devm_request_pci_bus_resources(dev, &res);
if (err)
goto error;
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(pcie->dev, "Failed creating IRQ Domain\n");
dev_err(dev, "Failed creating IRQ Domain\n");
goto error;
}
bus = pci_create_root_bus(&pdev->dev, pcie->root_busno,
bus = pci_create_root_bus(dev, pcie->root_busno,
&nwl_pcie_ops, pcie, &res);
if (!bus) {
err = -ENOMEM;
@ -847,8 +852,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = nwl_pcie_enable_msi(pcie, bus);
if (err < 0) {
dev_err(&pdev->dev,
"failed to enable MSI support: %d\n", err);
dev_err(dev, "failed to enable MSI support: %d\n", err);
goto error;
}
}
@ -857,7 +861,6 @@ static int nwl_pcie_probe(struct platform_device *pdev)
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, pcie);
return 0;
error:

View File

@ -140,10 +140,11 @@ static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
*/
static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
{
struct device *dev = port->dev;
unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
dev_dbg(port->dev, "Requester ID %lu\n",
dev_dbg(dev, "Requester ID %lu\n",
val & XILINX_PCIE_RPEFR_REQ_ID);
pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
XILINX_PCIE_REG_RPEFR);
@ -228,11 +229,10 @@ static void xilinx_pcie_destroy_msi(unsigned int irq)
/**
* xilinx_pcie_assign_msi - Allocate MSI number
* @port: PCIe port structure
*
* Return: A valid IRQ on success and error value on failure.
*/
static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
static int xilinx_pcie_assign_msi(void)
{
int pos;
@ -275,7 +275,7 @@ static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
struct msi_msg msg;
phys_addr_t msg_addr;
hwirq = xilinx_pcie_assign_msi(port);
hwirq = xilinx_pcie_assign_msi();
if (hwirq < 0)
return hwirq;
@ -383,6 +383,7 @@ static const struct irq_domain_ops intx_domain_ops = {
static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
{
struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
struct device *dev = port->dev;
u32 val, mask, status, msi_data;
/* Read interrupt decode and mask registers */
@ -394,32 +395,32 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
return IRQ_NONE;
if (status & XILINX_PCIE_INTR_LINK_DOWN)
dev_warn(port->dev, "Link Down\n");
dev_warn(dev, "Link Down\n");
if (status & XILINX_PCIE_INTR_ECRC_ERR)
dev_warn(port->dev, "ECRC failed\n");
dev_warn(dev, "ECRC failed\n");
if (status & XILINX_PCIE_INTR_STR_ERR)
dev_warn(port->dev, "Streaming error\n");
dev_warn(dev, "Streaming error\n");
if (status & XILINX_PCIE_INTR_HOT_RESET)
dev_info(port->dev, "Hot reset\n");
dev_info(dev, "Hot reset\n");
if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
dev_warn(port->dev, "ECAM access timeout\n");
dev_warn(dev, "ECAM access timeout\n");
if (status & XILINX_PCIE_INTR_CORRECTABLE) {
dev_warn(port->dev, "Correctable error message\n");
dev_warn(dev, "Correctable error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
if (status & XILINX_PCIE_INTR_NONFATAL) {
dev_warn(port->dev, "Non fatal error message\n");
dev_warn(dev, "Non fatal error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
if (status & XILINX_PCIE_INTR_FATAL) {
dev_warn(port->dev, "Fatal error message\n");
dev_warn(dev, "Fatal error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
@ -429,7 +430,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
/* Check whether interrupt valid */
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
dev_warn(port->dev, "RP Intr FIFO1 read error\n");
dev_warn(dev, "RP Intr FIFO1 read error\n");
goto error;
}
@ -451,7 +452,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
dev_warn(port->dev, "RP Intr FIFO1 read error\n");
dev_warn(dev, "RP Intr FIFO1 read error\n");
goto error;
}
@ -471,31 +472,31 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
}
if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
dev_warn(port->dev, "Slave unsupported request\n");
dev_warn(dev, "Slave unsupported request\n");
if (status & XILINX_PCIE_INTR_SLV_UNEXP)
dev_warn(port->dev, "Slave unexpected completion\n");
dev_warn(dev, "Slave unexpected completion\n");
if (status & XILINX_PCIE_INTR_SLV_COMPL)
dev_warn(port->dev, "Slave completion timeout\n");
dev_warn(dev, "Slave completion timeout\n");
if (status & XILINX_PCIE_INTR_SLV_ERRP)
dev_warn(port->dev, "Slave Error Poison\n");
dev_warn(dev, "Slave Error Poison\n");
if (status & XILINX_PCIE_INTR_SLV_CMPABT)
dev_warn(port->dev, "Slave Completer Abort\n");
dev_warn(dev, "Slave Completer Abort\n");
if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
dev_warn(port->dev, "Slave Illegal Burst\n");
dev_warn(dev, "Slave Illegal Burst\n");
if (status & XILINX_PCIE_INTR_MST_DECERR)
dev_warn(port->dev, "Master decode error\n");
dev_warn(dev, "Master decode error\n");
if (status & XILINX_PCIE_INTR_MST_SLVERR)
dev_warn(port->dev, "Master slave error\n");
dev_warn(dev, "Master slave error\n");
if (status & XILINX_PCIE_INTR_MST_ERRP)
dev_warn(port->dev, "Master error poison\n");
dev_warn(dev, "Master error poison\n");
error:
/* Clear the Interrupt Decode register */
@ -554,10 +555,12 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
*/
static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
{
struct device *dev = port->dev;
if (xilinx_pcie_link_is_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
dev_info(dev, "PCIe Link is UP\n");
else
dev_info(port->dev, "PCIe Link is DOWN\n");
dev_info(dev, "PCIe Link is DOWN\n");
/* Disable all interrupts */
pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
@ -627,8 +630,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
*/
static int xilinx_pcie_probe(struct platform_device *pdev)
{
struct xilinx_pcie_port *port;
struct device *dev = &pdev->dev;
struct xilinx_pcie_port *port;
struct pci_bus *bus;
int err;
resource_size_t iobase = 0;
@ -668,15 +671,14 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
if (err)
goto error;
bus = pci_create_root_bus(&pdev->dev, 0,
&xilinx_pcie_ops, port, &res);
bus = pci_create_root_bus(dev, 0, &xilinx_pcie_ops, port, &res);
if (!bus) {
err = -ENOMEM;
goto error;
}
#ifdef CONFIG_PCI_MSI
xilinx_pcie_msi_chip.dev = port->dev;
xilinx_pcie_msi_chip.dev = dev;
bus->msi = &xilinx_pcie_msi_chip;
#endif
pci_scan_child_bus(bus);
@ -685,8 +687,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
#endif
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, port);
return 0;
error: