@@ -345,6 +345,11 @@ static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH) |
FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
+ if (q->quirks & CMDQ_QUIRK_SYNC_CS_NONE_ONLY) {
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_NONE);
+ return;
+ }
+
if (!(smmu->options & ARM_SMMU_OPT_MSIPOLL)) {
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
return;
@@ -690,7 +695,8 @@ static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq *cmdq,
struct arm_smmu_ll_queue *llq)
{
- if (smmu->options & ARM_SMMU_OPT_MSIPOLL)
+ if (smmu->options & ARM_SMMU_OPT_MSIPOLL &&
+ !(cmdq->q.quirks & CMDQ_QUIRK_SYNC_CS_NONE_ONLY))
return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq);
return __arm_smmu_cmdq_poll_until_consumed(smmu, cmdq, llq);
@@ -543,6 +543,9 @@ struct arm_smmu_queue {
u32 __iomem *prod_reg;
u32 __iomem *cons_reg;
+
+#define CMDQ_QUIRK_SYNC_CS_NONE_ONLY BIT(0) /* CMD_SYNC CS field supports CS_NONE only */
+ u32 quirks;
};
struct arm_smmu_queue_poll {