aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch
diff options
context:
space:
mode:
authorYangbo Lu <yangbo.lu@nxp.com>2017-10-16 18:48:11 +0800
committerJohn Crispin <john@phrozen.org>2017-11-10 23:00:49 +0100
commitce6311d2836408f8e79596d1c8d4daf85e94a672 (patch)
treed566f0a5ec4f332ae0b1ded9e015740c2a5bc6da /target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch
parentede04541ba751956c59235ed736c679e8a494c35 (diff)
downloadupstream-ce6311d2836408f8e79596d1c8d4daf85e94a672.tar.gz
upstream-ce6311d2836408f8e79596d1c8d4daf85e94a672.tar.bz2
upstream-ce6311d2836408f8e79596d1c8d4daf85e94a672.zip
layerscape: update kernel patches
Updated kernel patches to align layerscape kernel with latest LSDK linux (LSDK-17.09-update-103017-V4.9 tag). Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Diffstat (limited to 'target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch')
-rw-r--r--target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch461
1 files changed, 408 insertions, 53 deletions
diff --git a/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch b/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch
index ec89173619..fad24106a0 100644
--- a/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch
+++ b/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch
@@ -1,4 +1,4 @@
-From f1874c71c855bd8ca8478a622053276f2c61eeca Mon Sep 17 00:00:00 2001
+From 152f316e7829f6aeb3a36009e7e5ec0f1d97d770 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
Date: Wed, 27 Sep 2017 10:33:26 +0800
Subject: [PATCH] iommu: support layerscape
@@ -12,20 +12,22 @@ Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
drivers/iommu/amd_iommu.c | 56 ++++++----
- drivers/iommu/arm-smmu-v3.c | 35 ++++++-
- drivers/iommu/arm-smmu.c | 74 ++++++++++---
+ drivers/iommu/arm-smmu-v3.c | 117 ++++++++++++++-------
+ drivers/iommu/arm-smmu.c | 100 +++++++++++++++---
drivers/iommu/dma-iommu.c | 242 ++++++++++++++++++++++++++++++++++++-------
drivers/iommu/intel-iommu.c | 92 ++++++++++++----
- drivers/iommu/iommu.c | 191 ++++++++++++++++++++++++++++++++--
+ drivers/iommu/iommu.c | 219 ++++++++++++++++++++++++++++++++++++---
drivers/iommu/mtk_iommu.c | 2 +
drivers/iommu/mtk_iommu_v1.c | 2 +
include/linux/dma-iommu.h | 11 ++
include/linux/iommu.h | 55 +++++++---
- 10 files changed, 645 insertions(+), 115 deletions(-)
+ 10 files changed, 739 insertions(+), 157 deletions(-)
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index c380b7e8..93199931 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
-@@ -373,6 +373,8 @@ static struct iommu_group *acpihid_devic
+@@ -373,6 +373,8 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
if (!entry->group)
entry->group = generic_device_group(dev);
@@ -34,7 +36,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
return entry->group;
}
-@@ -3160,9 +3162,10 @@ static bool amd_iommu_capable(enum iommu
+@@ -3159,9 +3161,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return false;
}
@@ -47,7 +49,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
struct unity_map_entry *entry;
int devid;
-@@ -3171,41 +3174,56 @@ static void amd_iommu_get_dm_regions(str
+@@ -3170,41 +3173,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
return;
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
@@ -118,7 +120,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
{
struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
unsigned long start, end;
-@@ -3229,9 +3247,9 @@ static const struct iommu_ops amd_iommu_
+@@ -3228,9 +3246,9 @@ static const struct iommu_ops amd_iommu_ops = {
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
.device_group = amd_iommu_device_group,
@@ -131,6 +133,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index e6f9b2d7..48e2a7c4 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -410,6 +410,9 @@
@@ -143,7 +147,92 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
static bool disable_bypass;
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
-@@ -1370,8 +1373,6 @@ static bool arm_smmu_capable(enum iommu_
+@@ -552,9 +555,14 @@ struct arm_smmu_s2_cfg {
+ };
+
+ struct arm_smmu_strtab_ent {
+- bool valid;
+-
+- bool bypass; /* Overrides s1/s2 config */
++ /*
++ * An STE is "assigned" if the master emitting the corresponding SID
++ * is attached to a domain. The behaviour of an unassigned STE is
++ * determined by the disable_bypass parameter, whereas an assigned
++ * STE behaves according to s1_cfg/s2_cfg, which themselves are
++ * configured according to the domain type.
++ */
++ bool assigned;
+ struct arm_smmu_s1_cfg *s1_cfg;
+ struct arm_smmu_s2_cfg *s2_cfg;
+ };
+@@ -627,6 +635,7 @@ enum arm_smmu_domain_stage {
+ ARM_SMMU_DOMAIN_S1 = 0,
+ ARM_SMMU_DOMAIN_S2,
+ ARM_SMMU_DOMAIN_NESTED,
++ ARM_SMMU_DOMAIN_BYPASS,
+ };
+
+ struct arm_smmu_domain {
+@@ -1000,9 +1009,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
+ * This is hideously complicated, but we only really care about
+ * three cases at the moment:
+ *
+- * 1. Invalid (all zero) -> bypass (init)
+- * 2. Bypass -> translation (attach)
+- * 3. Translation -> bypass (detach)
++ * 1. Invalid (all zero) -> bypass/fault (init)
++ * 2. Bypass/fault -> translation/bypass (attach)
++ * 3. Translation/bypass -> bypass/fault (detach)
+ *
+ * Given that we can't update the STE atomically and the SMMU
+ * doesn't read the thing in a defined order, that leaves us
+@@ -1040,17 +1049,16 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
+ }
+ }
+
+- /* Nuke the existing Config, as we're going to rewrite it */
+- val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
++ /* Nuke the existing STE_0 value, as we're going to rewrite it */
++ val = STRTAB_STE_0_V;
+
+- if (ste->valid)
+- val |= STRTAB_STE_0_V;
+- else
+- val &= ~STRTAB_STE_0_V;
++ /* Bypass/fault */
++ if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
++ if (!ste->assigned && disable_bypass)
++ val |= STRTAB_STE_0_CFG_ABORT;
++ else
++ val |= STRTAB_STE_0_CFG_BYPASS;
+
+- if (ste->bypass) {
+- val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
+- : STRTAB_STE_0_CFG_BYPASS;
+ dst[0] = cpu_to_le64(val);
+ dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
+ << STRTAB_STE_1_SHCFG_SHIFT);
+@@ -1081,7 +1089,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
+ val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
+ << STRTAB_STE_0_S1CTXPTR_SHIFT) |
+ STRTAB_STE_0_CFG_S1_TRANS;
+-
+ }
+
+ if (ste->s2_cfg) {
+@@ -1114,10 +1121,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
+ static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
+ {
+ unsigned int i;
+- struct arm_smmu_strtab_ent ste = {
+- .valid = true,
+- .bypass = true,
+- };
++ struct arm_smmu_strtab_ent ste = { .assigned = false };
+
+ for (i = 0; i < nent; ++i) {
+ arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
+@@ -1370,8 +1374,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
@@ -152,7 +241,96 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
case IOMMU_CAP_NOEXEC:
return true;
default:
-@@ -1709,6 +1710,9 @@ arm_smmu_iova_to_phys(struct iommu_domai
+@@ -1383,7 +1385,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
+ {
+ struct arm_smmu_domain *smmu_domain;
+
+- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
++ if (type != IOMMU_DOMAIN_UNMANAGED &&
++ type != IOMMU_DOMAIN_DMA &&
++ type != IOMMU_DOMAIN_IDENTITY)
+ return NULL;
+
+ /*
+@@ -1514,6 +1518,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
++ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
++ smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
++ return 0;
++ }
++
+ /* Restrict the stage to what we can actually support */
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
+@@ -1584,7 +1593,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
+ return step;
+ }
+
+-static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
++static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
+ {
+ int i;
+ struct arm_smmu_master_data *master = fwspec->iommu_priv;
+@@ -1596,17 +1605,14 @@ static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
+
+ arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
+ }
+-
+- return 0;
+ }
+
+ static void arm_smmu_detach_dev(struct device *dev)
+ {
+ struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
+
+- master->ste.bypass = true;
+- if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
+- dev_warn(dev, "failed to install bypass STE\n");
++ master->ste.assigned = false;
++ arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
+ }
+
+ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+@@ -1625,7 +1631,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ ste = &master->ste;
+
+ /* Already attached to a different domain? */
+- if (!ste->bypass)
++ if (ste->assigned)
+ arm_smmu_detach_dev(dev);
+
+ mutex_lock(&smmu_domain->init_mutex);
+@@ -1646,10 +1652,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ goto out_unlock;
+ }
+
+- ste->bypass = false;
+- ste->valid = true;
++ ste->assigned = true;
+
+- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
++ if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
++ ste->s1_cfg = NULL;
++ ste->s2_cfg = NULL;
++ } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ ste->s1_cfg = &smmu_domain->s1_cfg;
+ ste->s2_cfg = NULL;
+ arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
+@@ -1658,10 +1666,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ ste->s2_cfg = &smmu_domain->s2_cfg;
+ }
+
+- ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
+- if (ret < 0)
+- ste->valid = false;
+-
++ arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
+ out_unlock:
+ mutex_unlock(&smmu_domain->init_mutex);
+ return ret;
+@@ -1709,6 +1714,9 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
@@ -162,7 +340,36 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
if (!ops)
return 0;
-@@ -1880,6 +1884,31 @@ static int arm_smmu_of_xlate(struct devi
+@@ -1807,7 +1815,7 @@ static void arm_smmu_remove_device(struct device *dev)
+ return;
+
+ master = fwspec->iommu_priv;
+- if (master && master->ste.valid)
++ if (master && master->ste.assigned)
+ arm_smmu_detach_dev(dev);
+ iommu_group_remove_device(dev);
+ kfree(master);
+@@ -1836,6 +1844,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
+ {
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
++ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
++ return -EINVAL;
++
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+@@ -1851,6 +1862,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
+ int ret = 0;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
++ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
++ return -EINVAL;
++
+ mutex_lock(&smmu_domain->init_mutex);
+
+ switch (attr) {
+@@ -1880,6 +1894,31 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
@@ -194,7 +401,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
-@@ -1895,6 +1924,8 @@ static struct iommu_ops arm_smmu_ops = {
+@@ -1895,6 +1934,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
@@ -203,6 +410,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index 8f728144..3243a96d 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -49,6 +49,7 @@
@@ -231,7 +440,65 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
static int force_stage;
module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage,
-@@ -1343,6 +1348,9 @@ static phys_addr_t arm_smmu_iova_to_phys
+@@ -401,6 +406,7 @@ enum arm_smmu_domain_stage {
+ ARM_SMMU_DOMAIN_S1 = 0,
+ ARM_SMMU_DOMAIN_S2,
+ ARM_SMMU_DOMAIN_NESTED,
++ ARM_SMMU_DOMAIN_BYPASS,
+ };
+
+ struct arm_smmu_domain {
+@@ -821,6 +827,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+ if (smmu_domain->smmu)
+ goto out_unlock;
+
++ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
++ smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
++ smmu_domain->smmu = smmu;
++ goto out_unlock;
++ }
++
+ /*
+ * Mapping the requested stage onto what we support is surprisingly
+ * complicated, mainly because the spec allows S1+S2 SMMUs without
+@@ -981,7 +993,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+ void __iomem *cb_base;
+ int irq;
+
+- if (!smmu)
++ if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
+ return;
+
+ /*
+@@ -1004,7 +1016,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
+ {
+ struct arm_smmu_domain *smmu_domain;
+
+- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
++ if (type != IOMMU_DOMAIN_UNMANAGED &&
++ type != IOMMU_DOMAIN_DMA &&
++ type != IOMMU_DOMAIN_IDENTITY)
+ return NULL;
+ /*
+ * Allocate the domain and initialise some of its data structures.
+@@ -1202,10 +1216,15 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
+ {
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_s2cr *s2cr = smmu->s2crs;
+- enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
+ u8 cbndx = smmu_domain->cfg.cbndx;
++ enum arm_smmu_s2cr_type type;
+ int i, idx;
+
++ if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
++ type = S2CR_TYPE_BYPASS;
++ else
++ type = S2CR_TYPE_TRANS;
++
+ for_each_cfg_sme(fwspec, i, idx) {
+ if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
+ continue;
+@@ -1343,6 +1362,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
@@ -241,7 +508,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
if (!ops)
return 0;
-@@ -1368,8 +1376,6 @@ static bool arm_smmu_capable(enum iommu_
+@@ -1368,8 +1390,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
* requests.
*/
return true;
@@ -250,7 +517,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
case IOMMU_CAP_NOEXEC:
return true;
default:
-@@ -1478,10 +1484,12 @@ static struct iommu_group *arm_smmu_devi
+@@ -1478,10 +1498,12 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
}
if (group)
@@ -264,7 +531,27 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
else
group = generic_device_group(dev);
-@@ -1534,17 +1542,44 @@ out_unlock:
+@@ -1493,6 +1515,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
+ {
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
++ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
++ return -EINVAL;
++
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+@@ -1508,6 +1533,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
+ int ret = 0;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
++ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
++ return -EINVAL;
++
+ mutex_lock(&smmu_domain->init_mutex);
+
+ switch (attr) {
+@@ -1534,17 +1562,44 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
@@ -310,7 +597,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
-@@ -1560,6 +1595,8 @@ static struct iommu_ops arm_smmu_ops = {
+@@ -1560,6 +1615,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
@@ -319,7 +606,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
-@@ -1581,16 +1618,22 @@ static void arm_smmu_device_reset(struct
+@@ -1581,16 +1638,22 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
for (i = 0; i < smmu->num_mapping_groups; ++i)
arm_smmu_write_sme(smmu, i);
@@ -351,7 +638,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
}
-@@ -2024,6 +2067,11 @@ static int arm_smmu_device_dt_probe(stru
+@@ -2024,6 +2087,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
}
#endif
@@ -363,6 +650,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
return 0;
}
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 1520e7f0..3ade4153 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
@@ -420,10 +709,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
}
int iommu_dma_init(void)
-@@ -62,25 +97,53 @@ int iommu_dma_init(void)
+@@ -61,26 +96,54 @@ int iommu_dma_init(void)
+ * callback when domain->type == IOMMU_DOMAIN_DMA.
*/
int iommu_get_dma_cookie(struct iommu_domain *domain)
- {
++{
+ if (domain->iova_cookie)
+ return -EEXIST;
+
@@ -448,7 +738,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * used by the devices attached to @domain.
+ */
+int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
-+{
+ {
struct iommu_dma_cookie *cookie;
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
@@ -479,7 +769,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
*
* IOMMU drivers should normally call this from their domain_free callback.
*/
-@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_d
+@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
if (!cookie)
return;
@@ -488,7 +778,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
put_iova_domain(&cookie->iovad);
list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
-@@ -104,21 +167,99 @@ void iommu_put_dma_cookie(struct iommu_d
+@@ -104,21 +167,99 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
}
EXPORT_SYMBOL(iommu_put_dma_cookie);
@@ -594,7 +884,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
}
/**
-@@ -136,11 +277,12 @@ static void iova_reserve_pci_windows(str
+@@ -136,11 +277,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
{
@@ -610,7 +900,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
-@@ -160,22 +302,37 @@ int iommu_dma_init_domain(struct iommu_d
+@@ -160,22 +302,37 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
end_pfn = min_t(unsigned long, end_pfn,
domain->geometry.aperture_end >> order);
}
@@ -657,7 +947,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
}
EXPORT_SYMBOL(iommu_dma_init_domain);
-@@ -643,11 +800,12 @@ static struct iommu_dma_msi_page *iommu_
+@@ -643,11 +800,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi_page;
@@ -672,7 +962,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
list_for_each_entry(msi_page, &cookie->msi_page_list, list)
if (msi_page->phys == msi_addr)
return msi_page;
-@@ -656,13 +814,18 @@ static struct iommu_dma_msi_page *iommu_
+@@ -656,13 +814,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
@@ -697,7 +987,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
goto out_free_iova;
INIT_LIST_HEAD(&msi_page->list);
-@@ -670,7 +833,10 @@ static struct iommu_dma_msi_page *iommu_
+@@ -670,7 +833,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return msi_page;
out_free_iova:
@@ -709,7 +999,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
out_free_page:
kfree(msi_page);
return NULL;
-@@ -711,7 +877,7 @@ void iommu_dma_map_msi_msg(int irq, stru
+@@ -711,7 +877,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
msg->data = ~0U;
} else {
msg->address_hi = upper_32_bits(msi_page->iova);
@@ -718,6 +1008,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
msg->address_lo += lower_32_bits(msi_page->iova);
}
}
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 002f8a42..befbfd30 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
@@ -728,7 +1020,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
};
struct dmar_atsr_unit {
-@@ -4250,27 +4251,40 @@ static inline void init_iommu_pm_ops(voi
+@@ -4250,27 +4251,40 @@ static inline void init_iommu_pm_ops(void) {}
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
@@ -782,7 +1074,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
kfree(rmrru);
}
-@@ -5219,6 +5234,45 @@ static void intel_iommu_remove_device(st
+@@ -5219,6 +5234,45 @@ static void intel_iommu_remove_device(struct device *dev)
iommu_device_unlink(iommu->iommu_dev, dev);
}
@@ -828,7 +1120,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
#ifdef CONFIG_INTEL_IOMMU_SVM
#define MAX_NR_PASID_BITS (20)
static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
-@@ -5349,19 +5403,21 @@ struct intel_iommu *intel_svm_device_to_
+@@ -5349,19 +5403,21 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
#endif /* CONFIG_INTEL_IOMMU_SVM */
static const struct iommu_ops intel_iommu_ops = {
@@ -863,9 +1155,19 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
};
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 87d3060f..e6a8c225 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
-@@ -68,6 +68,13 @@ struct iommu_group_attribute {
+@@ -36,6 +36,7 @@
+
+ static struct kset *iommu_group_kset;
+ static DEFINE_IDA(iommu_group_ida);
++static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
+
+ struct iommu_callback_data {
+ const struct iommu_ops *ops;
+@@ -68,6 +69,13 @@ struct iommu_group_attribute {
const char *buf, size_t count);
};
@@ -879,7 +1181,26 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
-@@ -133,8 +140,131 @@ static ssize_t iommu_group_show_name(str
+@@ -86,6 +94,18 @@ static int __iommu_attach_group(struct iommu_domain *domain,
+ static void __iommu_detach_group(struct iommu_domain *domain,
+ struct iommu_group *group);
+
++static int __init iommu_set_def_domain_type(char *str)
++{
++ bool pt;
++
++ if (!str || strtobool(str, &pt))
++ return -EINVAL;
++
++ iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
++ return 0;
++}
++early_param("iommu.passthrough", iommu_set_def_domain_type);
++
+ static ssize_t iommu_group_attr_show(struct kobject *kobj,
+ struct attribute *__attr, char *buf)
+ {
+@@ -133,8 +153,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
return sprintf(buf, "%s\n", group->name);
}
@@ -1011,7 +1332,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
static void iommu_group_release(struct kobject *kobj)
{
struct iommu_group *group = to_iommu_group(kobj);
-@@ -212,6 +342,11 @@ struct iommu_group *iommu_group_alloc(vo
+@@ -212,6 +355,11 @@ struct iommu_group *iommu_group_alloc(void)
*/
kobject_put(&group->kobj);
@@ -1023,7 +1344,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
pr_debug("Allocated group %d\n", group->id);
return group;
-@@ -318,7 +453,7 @@ static int iommu_group_create_direct_map
+@@ -318,7 +466,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
struct device *dev)
{
struct iommu_domain *domain = group->default_domain;
@@ -1032,7 +1353,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
struct list_head mappings;
unsigned long pg_size;
int ret = 0;
-@@ -331,18 +466,21 @@ static int iommu_group_create_direct_map
+@@ -331,18 +479,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
pg_size = 1UL << __ffs(domain->pgsize_bitmap);
INIT_LIST_HEAD(&mappings);
@@ -1057,7 +1378,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
for (addr = start; addr < end; addr += pg_size) {
phys_addr_t phys_addr;
-@@ -358,7 +496,7 @@ static int iommu_group_create_direct_map
+@@ -358,7 +509,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
}
out:
@@ -1066,10 +1387,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
return ret;
}
-@@ -563,6 +701,19 @@ struct iommu_group *iommu_group_get(stru
+@@ -562,6 +713,19 @@ struct iommu_group *iommu_group_get(struct device *dev)
+ }
EXPORT_SYMBOL_GPL(iommu_group_get);
- /**
++/**
+ * iommu_group_ref_get - Increment reference on a group
+ * @group: the group to use, must not be NULL
+ *
@@ -1082,11 +1404,33 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return group;
+}
+
-+/**
+ /**
* iommu_group_put - Decrement group reference
* @group: the group to use
- *
-@@ -1557,20 +1708,38 @@ int iommu_domain_set_attr(struct iommu_d
+@@ -845,10 +1009,19 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
+ * IOMMU driver.
+ */
+ if (!group->default_domain) {
+- group->default_domain = __iommu_domain_alloc(dev->bus,
+- IOMMU_DOMAIN_DMA);
++ struct iommu_domain *dom;
++
++ dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
++ if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
++ dev_warn(dev,
++ "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
++ iommu_def_domain_type);
++ dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
++ }
++
++ group->default_domain = dom;
+ if (!group->domain)
+- group->domain = group->default_domain;
++ group->domain = dom;
+ }
+
+ ret = iommu_group_add_device(group, dev);
+@@ -1557,20 +1730,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
@@ -1131,9 +1475,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
}
/* Request that a device is direct mapped by the IOMMU */
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index b12c12d7..9799daea 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
-@@ -410,6 +410,8 @@ static struct iommu_group *mtk_iommu_dev
+@@ -410,6 +410,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
data->m4u_group = iommu_group_alloc();
if (IS_ERR(data->m4u_group))
dev_err(dev, "Failed to allocate M4U IOMMU group\n");
@@ -1142,9 +1488,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
}
return data->m4u_group;
}
+diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
+index b8aeb076..c7063e9d 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
-@@ -502,6 +502,8 @@ static struct iommu_group *mtk_iommu_dev
+@@ -502,6 +502,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
data->m4u_group = iommu_group_alloc();
if (IS_ERR(data->m4u_group))
dev_err(dev, "Failed to allocate M4U IOMMU group\n");
@@ -1153,6 +1501,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
}
return data->m4u_group;
}
+diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
+index 32c58906..36d3206d 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -27,6 +27,7 @@ int iommu_dma_init(void);
@@ -1163,7 +1513,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */
-@@ -66,6 +67,7 @@ int iommu_dma_mapping_error(struct devic
+@@ -66,6 +67,7 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
/* The DMA API isn't _quite_ the whole story, though... */
void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
@@ -1171,7 +1521,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
#else
-@@ -82,6 +84,11 @@ static inline int iommu_get_dma_cookie(s
+@@ -82,6 +84,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
return -ENODEV;
}
@@ -1183,7 +1533,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
{
}
-@@ -90,6 +97,10 @@ static inline void iommu_dma_map_msi_msg
+@@ -90,6 +97,10 @@ static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
{
}
@@ -1194,6 +1544,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
#endif /* CONFIG_IOMMU_DMA */
#endif /* __KERNEL__ */
#endif /* __DMA_IOMMU_H */
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index 436dc213..188599f5 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -117,18 +117,32 @@ enum iommu_attr {
@@ -1262,7 +1614,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
/* Window handling functions */
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
-@@ -233,9 +248,14 @@ extern phys_addr_t iommu_iova_to_phys(st
+@@ -233,9 +248,14 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
@@ -1279,7 +1631,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
extern int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
-@@ -253,6 +273,7 @@ extern void iommu_group_remove_device(st
+@@ -253,6 +273,7 @@ extern void iommu_group_remove_device(struct device *dev);
extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *));
extern struct iommu_group *iommu_group_get(struct device *dev);
@@ -1287,7 +1639,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
extern void iommu_group_put(struct iommu_group *group);
extern int iommu_group_register_notifier(struct iommu_group *group,
struct notifier_block *nb);
-@@ -439,16 +460,22 @@ static inline void iommu_set_fault_handl
+@@ -439,16 +460,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
{
}
@@ -1312,3 +1664,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
static inline int iommu_request_dm_for_dev(struct device *dev)
{
return -ENODEV;
+--
+2.14.1
+