Commit fac95671 authored by John Garry's avatar John Garry Committed by Will Deacon

iommu/arm-smmu-v3: Stop pre-zeroing batch commands

Pre-zeroing the batched commands structure is inefficient, as individual
commands are zeroed later in arm_smmu_cmdq_build_cmd(). The size is quite
large and commonly most commands won't even be used:

	struct arm_smmu_cmdq_batch cmds = {};
345c:	52800001 	mov	w1, #0x0                   	// #0
3460:	d2808102 	mov	x2, #0x408                 	// #1032
3464:	910143a0 	add	x0, x29, #0x50
3468:	94000000 	bl	0 <memset>

Stop pre-zeroing the complete structure and only zero the num member.
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/1628696966-88386-1-git-send-email-john.garry@huawei.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 2cbeaf3f
...@@ -955,7 +955,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, ...@@ -955,7 +955,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
size_t i; size_t i;
unsigned long flags; unsigned long flags;
struct arm_smmu_master *master; struct arm_smmu_master *master;
struct arm_smmu_cmdq_batch cmds = {}; struct arm_smmu_cmdq_batch cmds;
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cmdq_ent cmd = { struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_CFGI_CD, .opcode = CMDQ_OP_CFGI_CD,
...@@ -965,6 +965,8 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, ...@@ -965,6 +965,8 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
}, },
}; };
cmds.num = 0;
spin_lock_irqsave(&smmu_domain->devices_lock, flags); spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) { list_for_each_entry(master, &smmu_domain->devices, domain_head) {
for (i = 0; i < master->num_streams; i++) { for (i = 0; i < master->num_streams; i++) {
...@@ -1781,7 +1783,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, ...@@ -1781,7 +1783,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long flags; unsigned long flags;
struct arm_smmu_cmdq_ent cmd; struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_master *master; struct arm_smmu_master *master;
struct arm_smmu_cmdq_batch cmds = {}; struct arm_smmu_cmdq_batch cmds;
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
return 0; return 0;
...@@ -1805,6 +1807,8 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, ...@@ -1805,6 +1807,8 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd); arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
cmds.num = 0;
spin_lock_irqsave(&smmu_domain->devices_lock, flags); spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) { list_for_each_entry(master, &smmu_domain->devices, domain_head) {
if (!master->ats_enabled) if (!master->ats_enabled)
...@@ -1852,7 +1856,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, ...@@ -1852,7 +1856,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
unsigned long end = iova + size, num_pages = 0, tg = 0; unsigned long end = iova + size, num_pages = 0, tg = 0;
size_t inv_range = granule; size_t inv_range = granule;
struct arm_smmu_cmdq_batch cmds = {}; struct arm_smmu_cmdq_batch cmds;
if (!size) if (!size)
return; return;
...@@ -1870,6 +1874,8 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, ...@@ -1870,6 +1874,8 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
num_pages = size >> tg; num_pages = size >> tg;
} }
cmds.num = 0;
while (iova < end) { while (iova < end) {
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment