@@ -19,6 +19,7 @@
#include <linux/mfd/syscon.h>
#include <linux/msi.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
@@ -26,6 +27,7 @@
#include <linux/regmap.h>
#include <linux/resource.h>
#include <linux/signal.h>
+#include <linux/ti-pvu.h>
#include "../../pci.h"
#include "pcie-designware.h"
@@ -111,6 +113,16 @@
#define PCI_DEVICE_ID_TI_AM654X 0xb00c
+#define KS_PCI_VIRTID 0
+
+#define PCIE_VMAP_xP_CTRL 0x0
+#define PCIE_VMAP_xP_REQID 0x4
+#define PCIE_VMAP_xP_VIRTID 0x8
+
+#define PCIE_VMAP_xP_CTRL_EN BIT(0)
+
+#define PCIE_VMAP_xP_VIRTID_VID_MASK 0xfff
+
struct ks_pcie_of_data {
enum dw_pcie_device_mode mode;
const struct dw_pcie_host_ops *host_ops;
@@ -1125,6 +1137,96 @@ static const struct of_device_id ks_pcie_of_match[] = {
{ },
};
+#ifdef CONFIG_TI_PVU
+static int ks_init_vmap(struct platform_device *pdev, const char *vmap_name)
+{
+ struct resource *res;
+ void __iomem *base;
+ u32 val;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, vmap_name);
+ base = devm_pci_remap_cfg_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ writel(0, base + PCIE_VMAP_xP_REQID);
+
+ val = readl(base + PCIE_VMAP_xP_VIRTID);
+ val &= ~PCIE_VMAP_xP_VIRTID_VID_MASK;
+ val |= KS_PCI_VIRTID;
+ writel(val, base + PCIE_VMAP_xP_VIRTID);
+
+ val = readl(base + PCIE_VMAP_xP_CTRL);
+ val |= PCIE_VMAP_xP_CTRL_EN;
+ writel(val, base + PCIE_VMAP_xP_CTRL);
+
+ return 0;
+}
+
+static int ks_init_restricted_dma(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct of_phandle_iterator it;
+ struct resource phys;
+ int err;
+
+ /* Only process the first restricted dma pool, more are not allowed */
+ of_for_each_phandle(&it, err, dev->of_node, "memory-region",
+ NULL, 0) {
+ if (of_device_is_compatible(it.node, "restricted-dma-pool"))
+ break;
+ }
+ if (err)
+ return err == -ENOENT ? 0 : err;
+
+ err = of_address_to_resource(it.node, 0, &phys);
+ if (err < 0) {
+ dev_err(dev, "failed to parse memory region %pOF: %d\n",
+ it.node, err);
+ return 0;
+ }
+
+ /* Map all incoming requests on low and high prio port to virtID 0 */
+ err = ks_init_vmap(pdev, "vmap_lp");
+ if (err)
+ return err;
+ err = ks_init_vmap(pdev, "vmap_hp");
+ if (err)
+ return err;
+
+ /*
+ * Enforce DMA pool usage with the help of the PVU.
+ * Any request outside will be dropped and raise an error at the PVU.
+ */
+ return ti_pvu_create_region(KS_PCI_VIRTID, &phys);
+}
+
+static void ks_release_restricted_dma(struct platform_device *pdev)
+{
+ struct of_phandle_iterator it;
+ struct resource phys;
+ int err;
+
+ of_for_each_phandle(&it, err, pdev->dev.of_node, "memory-region",
+ NULL, 0) {
+ if (of_device_is_compatible(it.node, "restricted-dma-pool") &&
+ of_address_to_resource(it.node, 0, &phys) == 0) {
+ ti_pvu_remove_region(KS_PCI_VIRTID, &phys);
+ break;
+ }
+ }
+}
+#else
+static inline int ks_init_restricted_dma(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static inline void ks_release_restricted_dma(struct platform_device *pdev)
+{
+}
+#endif
+
static int ks_pcie_probe(struct platform_device *pdev)
{
const struct dw_pcie_host_ops *host_ops;
@@ -1273,6 +1375,10 @@ static int ks_pcie_probe(struct platform_device *pdev)
if (ret < 0)
goto err_get_sync;
+ ret = ks_init_restricted_dma(pdev);
+ if (ret < 0)
+ goto err_get_sync;
+
switch (mode) {
case DW_PCIE_RC_TYPE:
if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
@@ -1354,6 +1460,8 @@ static void ks_pcie_remove(struct platform_device *pdev)
int num_lanes = ks_pcie->num_lanes;
struct device *dev = &pdev->dev;
+ ks_release_restricted_dma(pdev);
+
pm_runtime_put(dev);
pm_runtime_disable(dev);
ks_pcie_disable_phy(ks_pcie);