On 2/27/26 3:47 PM, Mostafa Saleh wrote:
> On Sat, Feb 21, 2026 at 06:17:06PM +0800, Tao Tang wrote:
>> Extend IOMMU notifier functions to be SEC_SID-aware, ensuring that
>> invalidation notifications are dispatched to the correct security-aware
>> address space.
>>
>> This ensures IOMMU notifier events (e.g., to VFIO) are correctly routed
>> to the appropriate security world's address space, maintaining isolation
>> between secure and non-secure DMA operations.
> I don’t think that VFIO cares about secure regime, my guess is to
> keep the functions as this and don’t call them for secure invalidations.
So do we want to prevent VFIO devices from working in secure mode along
with the SMMU?
Eric
>
> Thanks,
> Mostafa
>
>> Signed-off-by: Tao Tang <[email protected]>
>> ---
>> hw/arm/smmuv3.c | 18 +++++++++++-------
>> 1 file changed, 11 insertions(+), 7 deletions(-)
>>
>> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
>> index 4a4de719a7c..e33a7babd1c 100644
>> --- a/hw/arm/smmuv3.c
>> +++ b/hw/arm/smmuv3.c
>> @@ -1195,15 +1195,16 @@ epilogue:
>> * @tg: translation granule (if communicated through range invalidation)
>> * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
>> * @stage: Which stage(1 or 2) is used
>> + * @sec_sid: StreamID Security state
>> */
>> static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
>> IOMMUNotifier *n,
>> int asid, int vmid,
>> dma_addr_t iova, uint8_t tg,
>> - uint64_t num_pages, int stage)
>> + uint64_t num_pages, int stage,
>> + SMMUSecSID sec_sid)
>> {
>> SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
>> - SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
>> SMMUEventInfo eventinfo = {.sec_sid = sec_sid,
>> .inval_ste_allowed = true};
>> SMMUTransCfg *cfg = smmuv3_get_config(sdev, &eventinfo, sec_sid);
>> @@ -1251,7 +1252,8 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
>> }
>>
>> event.type = IOMMU_NOTIFIER_UNMAP;
>> - event.entry.target_as = &address_space_memory;
>> + event.entry.target_as = smmu_get_address_space(sdev->smmu, sec_sid);
>> + g_assert(event.entry.target_as);
>> event.entry.iova = iova;
>> event.entry.addr_mask = num_pages * (1 << granule) - 1;
>> event.entry.perm = IOMMU_NONE;
>> @@ -1262,7 +1264,8 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
>> /* invalidate an asid/vmid/iova range tuple in all mr's */
>> static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, int vmid,
>> dma_addr_t iova, uint8_t tg,
>> - uint64_t num_pages, int stage)
>> + uint64_t num_pages, int stage,
>> + SMMUSecSID sec_sid)
>> {
>> SMMUDevice *sdev;
>>
>> @@ -1274,7 +1277,8 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s,
>> int asid, int vmid,
>> iova, tg, num_pages, stage);
>>
>> IOMMU_NOTIFIER_FOREACH(n, mr) {
>> - smmuv3_notify_iova(mr, n, asid, vmid, iova, tg, num_pages,
>> stage);
>> + smmuv3_notify_iova(mr, n, asid, vmid, iova, tg,
>> + num_pages, stage, sec_sid);
>> }
>> }
>> }
>> @@ -1307,7 +1311,7 @@ static void smmuv3_range_inval(SMMUState *s, Cmd *cmd,
>> SMMUStage stage,
>> if (!tg) {
>> trace_smmuv3_range_inval(sec_sid, vmid, asid, addr,
>> tg, 1, ttl, leaf, stage);
>> - smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, 1, stage);
>> + smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, 1, stage,
>> sec_sid);
>> if (stage == SMMU_STAGE_1) {
>> smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl, sec_sid);
>> } else {
>> @@ -1331,7 +1335,7 @@ static void smmuv3_range_inval(SMMUState *s, Cmd *cmd,
>> SMMUStage stage,
>> trace_smmuv3_range_inval(sec_sid, vmid, asid, addr, tg,
>> num_pages, ttl, leaf, stage);
>> smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg,
>> - num_pages, stage);
>> + num_pages, stage, sec_sid);
>> if (stage == SMMU_STAGE_1) {
>> smmu_iotlb_inv_iova(s, asid, vmid, addr, tg,
>> num_pages, ttl, sec_sid);
>> --
>> 2.34.1
>>