[PATCH v9 07/10] iommu/arm-smmu-v3: Maintain a SID->device structure
Jean-Philippe Brucker
jean-philippe at linaro.org
Fri Jan 8 09:52:15 EST 2021
When handling faults from the event or PRI queue, we need to find the
struct device associated with a SID. Add a rb_tree to keep track of
SIDs.
Signed-off-by: Jean-Philippe Brucker <jean-philippe at linaro.org>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 13 +-
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 161 ++++++++++++++++----
2 files changed, 144 insertions(+), 30 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 96c2e9565e00..8ef6a1c48635 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -636,6 +636,15 @@ struct arm_smmu_device {
/* IOMMU core code handle */
struct iommu_device iommu;
+
+ struct rb_root streams;
+ struct mutex streams_mutex;
+};
+
+struct arm_smmu_stream {
+ u32 id;
+ struct arm_smmu_master *master;
+ struct rb_node node;
};
/* SMMU private data for each master */
@@ -644,8 +653,8 @@ struct arm_smmu_master {
struct device *dev;
struct arm_smmu_domain *domain;
struct list_head domain_head;
- u32 *sids;
- unsigned int num_sids;
+ struct arm_smmu_stream *streams;
+ unsigned int num_streams;
bool ats_enabled;
bool sva_enabled;
struct list_head bonds;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 6a53b4edf054..2dbae2e6965d 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -912,8 +912,8 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
- for (i = 0; i < master->num_sids; i++) {
- cmd.cfgi.sid = master->sids[i];
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.cfgi.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
}
}
@@ -1355,6 +1355,32 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return 0;
}
+__maybe_unused
+static struct arm_smmu_master *
+arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
+{
+ struct rb_node *node;
+ struct arm_smmu_stream *stream;
+ struct arm_smmu_master *master = NULL;
+
+ mutex_lock(&smmu->streams_mutex);
+ node = smmu->streams.rb_node;
+ while (node) {
+ stream = rb_entry(node, struct arm_smmu_stream, node);
+ if (stream->id < sid) {
+ node = node->rb_right;
+ } else if (stream->id > sid) {
+ node = node->rb_left;
+ } else {
+ master = stream->master;
+ break;
+ }
+ }
+ mutex_unlock(&smmu->streams_mutex);
+
+ return master;
+}
+
/* IRQ and event handlers */
static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
{
@@ -1588,8 +1614,8 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
- for (i = 0; i < master->num_sids; i++) {
- cmd.atc.sid = master->sids[i];
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
}
@@ -1632,8 +1658,8 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
if (!master->ats_enabled)
continue;
- for (i = 0; i < master->num_sids; i++) {
- cmd.atc.sid = master->sids[i];
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
}
}
@@ -2040,13 +2066,13 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
int i, j;
struct arm_smmu_device *smmu = master->smmu;
- for (i = 0; i < master->num_sids; ++i) {
- u32 sid = master->sids[i];
+ for (i = 0; i < master->num_streams; ++i) {
+ u32 sid = master->streams[i].id;
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
/* Bridged PCI devices may end up with duplicated IDs */
for (j = 0; j < i; j++)
- if (master->sids[j] == sid)
+ if (master->streams[j].id == sid)
break;
if (j < i)
continue;
@@ -2319,11 +2345,101 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
return sid < limit;
}
+static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
+ struct arm_smmu_master *master)
+{
+ int i;
+ int ret = 0;
+ struct arm_smmu_stream *new_stream, *cur_stream;
+ struct rb_node **new_node, *parent_node = NULL;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+
+ master->streams = kcalloc(fwspec->num_ids,
+ sizeof(struct arm_smmu_stream), GFP_KERNEL);
+ if (!master->streams)
+ return -ENOMEM;
+ master->num_streams = fwspec->num_ids;
+
+ mutex_lock(&smmu->streams_mutex);
+ for (i = 0; i < fwspec->num_ids && !ret; i++) {
+ u32 sid = fwspec->ids[i];
+
+ new_stream = &master->streams[i];
+ new_stream->id = sid;
+ new_stream->master = master;
+
+ /*
+ * Check the SIDs are in range of the SMMU and our stream table
+ */
+ if (!arm_smmu_sid_in_range(smmu, sid)) {
+ ret = -ERANGE;
+ break;
+ }
+
+ /* Ensure l2 strtab is initialised */
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
+ ret = arm_smmu_init_l2_strtab(smmu, sid);
+ if (ret)
+ break;
+ }
+
+ /* Insert into SID tree */
+ new_node = &(smmu->streams.rb_node);
+ while (*new_node) {
+ cur_stream = rb_entry(*new_node, struct arm_smmu_stream,
+ node);
+ parent_node = *new_node;
+ if (cur_stream->id > new_stream->id) {
+ new_node = &((*new_node)->rb_left);
+ } else if (cur_stream->id < new_stream->id) {
+ new_node = &((*new_node)->rb_right);
+ } else {
+ dev_warn(master->dev,
+ "stream %u already in tree\n",
+ cur_stream->id);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (!ret) {
+ rb_link_node(&new_stream->node, parent_node, new_node);
+ rb_insert_color(&new_stream->node, &smmu->streams);
+ }
+ }
+
+ if (ret) {
+ for (; i > 0; i--)
+ rb_erase(&master->streams[i].node, &smmu->streams);
+ kfree(master->streams);
+ }
+ mutex_unlock(&smmu->streams_mutex);
+
+ return ret;
+}
+
+static void arm_smmu_remove_master(struct arm_smmu_master *master)
+{
+ int i;
+ struct arm_smmu_device *smmu = master->smmu;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+
+ if (!smmu || !master->streams)
+ return;
+
+ mutex_lock(&smmu->streams_mutex);
+ for (i = 0; i < fwspec->num_ids; i++)
+ rb_erase(&master->streams[i].node, &smmu->streams);
+ mutex_unlock(&smmu->streams_mutex);
+
+ kfree(master->streams);
+}
+
static struct iommu_ops arm_smmu_ops;
static struct iommu_device *arm_smmu_probe_device(struct device *dev)
{
- int i, ret;
+ int ret;
struct arm_smmu_device *smmu;
struct arm_smmu_master *master;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
@@ -2344,27 +2460,12 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
master->dev = dev;
master->smmu = smmu;
- master->sids = fwspec->ids;
- master->num_sids = fwspec->num_ids;
INIT_LIST_HEAD(&master->bonds);
dev_iommu_priv_set(dev, master);
- /* Check the SIDs are in range of the SMMU and our stream table */
- for (i = 0; i < master->num_sids; i++) {
- u32 sid = master->sids[i];
-
- if (!arm_smmu_sid_in_range(smmu, sid)) {
- ret = -ERANGE;
- goto err_free_master;
- }
-
- /* Ensure l2 strtab is initialised */
- if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
- ret = arm_smmu_init_l2_strtab(smmu, sid);
- if (ret)
- goto err_free_master;
- }
- }
+ ret = arm_smmu_insert_master(smmu, master);
+ if (ret)
+ goto err_free_master;
device_property_read_u32(dev, "pasid-num-bits", &master->ssid_bits);
master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits);
@@ -2403,6 +2504,7 @@ static void arm_smmu_release_device(struct device *dev)
WARN_ON(arm_smmu_master_sva_enabled(master));
arm_smmu_detach_dev(master);
arm_smmu_disable_pasid(master);
+ arm_smmu_remove_master(master);
kfree(master);
iommu_fwspec_free(dev);
}
@@ -2825,6 +2927,9 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
{
int ret;
+ mutex_init(&smmu->streams_mutex);
+ smmu->streams = RB_ROOT;
+
ret = arm_smmu_init_queues(smmu);
if (ret)
return ret;
--
2.29.2
More information about the linux-arm-kernel
mailing list