From nobody Sat Feb 7 19:01:10 2026 Received: from mgamail.intel.com (mgamail.intel.com [192.198.163.10]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4536613D602 for ; Tue, 4 Jun 2024 01:55:15 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=192.198.163.10 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717466116; cv=none; b=Y7pvPZIgxrjfKapvnXAZdJOX7Pj3KSR5pZQGZb9gs847MB9ZMGZ3S7ZOWS0mrMTj19hGkwDS9fELF58TB6S8fNLurr0pzidfh8zr0W8b3r9CCtFgi6u4SFsZgm5cU+86Xzx1jhhljDb0m8gk/7+zGe6s67wwvtZPcbOgUkmas2c= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717466116; c=relaxed/simple; bh=kAf7SPBLso5zp49wUYubqs0YKzd/lC+tt+i0+MOxu64=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=DiQ06789pj5xnrEsOMhEJj6L1WDcPnfLPAYGDopkik9bm4Yl1fm3CRq4E4SzX5xARVUgxGbryBNBkorTctVqnv/S3C31meR5KvNbTJuYhBT3THOD8RHL18xLo0JpwsyOpzG/M/ZA+yxBVdTDZ3CWoU0SFspec+/DHIG5l06QOHU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=none smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=ImMPwOS2; arc=none smtp.client-ip=192.198.163.10 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=none smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="ImMPwOS2" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1717466115; x=1749002115; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=kAf7SPBLso5zp49wUYubqs0YKzd/lC+tt+i0+MOxu64=; b=ImMPwOS2JXbxXm7beN8Z18qt5UlBLtMurMcdggCZURLEbu1jyJloiyQ7 EXmnThg8g+nPmKd42WzMD5XzasLCuFUdSubJYKaiXmoszx2qLb8+LRFiH uJtZavjtHQRvfYc1LcJAgV2wv0KO57VMWdOnvkQ5NaMOE1YWSXl4ICXj+ izBx78JY14YwX6E/lHV01gdQKSAKqmIP2te6mcNQWdLF+navbVk/uL9lq BQqCu7H38gUKfCPaJT5qLn/uSLAg2w7MhpmDdbbnraxdgbyle5VVkOQnS OJ0lseQyrRvoDiIfaGDL9Gu5I29AgbMj+PbyE0Iek/PP/oKH3aoP5VJV+ w==; X-CSE-ConnectionGUID: DTZBXE3mS9iaMVmim/WE+g== X-CSE-MsgGUID: qDrh4lNSRAGsLXXnw5cnaw== X-IronPort-AV: E=McAfee;i="6600,9927,11092"; a="25385112" X-IronPort-AV: E=Sophos;i="6.08,212,1712646000"; d="scan'208";a="25385112" Received: from orviesa008.jf.intel.com ([10.64.159.148]) by fmvoesa104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Jun 2024 18:55:15 -0700 X-CSE-ConnectionGUID: jA42G9WrSPGHTpJhOfsc+w== X-CSE-MsgGUID: pDOu6a06Qr+ncAZQM+PzJA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.08,212,1712646000"; d="scan'208";a="37662017" Received: from unknown (HELO allen-box.sh.intel.com) ([10.239.159.127]) by orviesa008.jf.intel.com with ESMTP; 03 Jun 2024 18:55:10 -0700 From: Lu Baolu To: Joerg Roedel , Will Deacon , Robin Murphy , Jason Gunthorpe , Kevin Tian Cc: Yi Liu , David Airlie , Daniel Vetter , Kalle Valo , Bjorn Andersson , Mathieu Poirier , Alex Williamson , mst@redhat.com, Jason Wang , Thierry Reding , Jonathan Hunter , Mikko Perttunen , iommu@lists.linux.dev, dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org, Lu Baolu Subject: [PATCH v2 16/22] iommu/vt-d: Add helper to allocate paging domain Date: Tue, 4 Jun 2024 09:51:28 +0800 Message-Id: <20240604015134.164206-17-baolu.lu@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20240604015134.164206-1-baolu.lu@linux.intel.com> References: <20240604015134.164206-1-baolu.lu@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The domain_alloc_user operation is currently implemented by allocating a paging domain using iommu_domain_alloc(). This is because it needs to fully initialize the domain before return. Add a helper to do this to avoid using iommu_domain_alloc(). Signed-off-by: Lu Baolu --- drivers/iommu/intel/iommu.c | 87 +++++++++++++++++++++++++++++++++---- 1 file changed, 78 insertions(+), 9 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 2e9811bf2a4e..ccde5f5972e4 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3633,6 +3633,79 @@ static struct iommu_domain blocking_domain =3D { } }; =20 +static int iommu_superpage_capability(struct intel_iommu *iommu, bool firs= t_stage) +{ + if (!intel_iommu_superpage) + return 0; + + if (first_stage) + return cap_fl1gp_support(iommu->cap) ? 2 : 1; + + return fls(cap_super_page_val(iommu->cap)); +} + +static struct dmar_domain *paging_domain_alloc(struct device *dev, bool fi= rst_stage) +{ + struct device_domain_info *info =3D dev_iommu_priv_get(dev); + struct intel_iommu *iommu =3D info->iommu; + struct dmar_domain *domain; + int addr_width; + + domain =3D kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&domain->devices); + INIT_LIST_HEAD(&domain->dev_pasids); + INIT_LIST_HEAD(&domain->cache_tags); + spin_lock_init(&domain->lock); + spin_lock_init(&domain->cache_lock); + xa_init(&domain->iommu_array); + + domain->nid =3D dev_to_node(dev); + domain->has_iotlb_device =3D info->ats_enabled; + domain->use_first_level =3D first_stage; + + /* calculate the address width */ + addr_width =3D agaw_to_width(iommu->agaw); + if (addr_width > cap_mgaw(iommu->cap)) + addr_width =3D cap_mgaw(iommu->cap); + domain->gaw =3D addr_width; + domain->agaw =3D iommu->agaw; + domain->max_addr =3D __DOMAIN_MAX_ADDR(addr_width); + + /* iommu memory access coherency */ + domain->iommu_coherency =3D iommu_paging_structure_coherency(iommu); + + /* pagesize bitmap */ + domain->domain.pgsize_bitmap =3D SZ_4K; + domain->iommu_superpage =3D iommu_superpage_capability(iommu, first_stage= ); + domain->domain.pgsize_bitmap |=3D domain_super_pgsize_bitmap(domain); + + /* + * IOVA aperture: First-level translation restricts the input-address + * to a canonical address (i.e., address bits 63:N have the same value + * as address bit [N-1], where N is 48-bits with 4-level paging and + * 57-bits with 5-level paging). Hence, skip bit [N-1]. + */ + domain->domain.geometry.force_aperture =3D true; + domain->domain.geometry.aperture_start =3D 0; + if (first_stage) + domain->domain.geometry.aperture_end =3D __DOMAIN_MAX_ADDR(domain->gaw -= 1); + else + domain->domain.geometry.aperture_end =3D __DOMAIN_MAX_ADDR(domain->gaw); + + /* always allocate the top pgd */ + domain->pgd =3D iommu_alloc_page_node(domain->nid, GFP_KERNEL); + if (!domain->pgd) { + kfree(domain); + return ERR_PTR(-ENOMEM); + } + domain_flush_cache(domain, domain->pgd, PAGE_SIZE); + + return domain; +} + static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) { struct dmar_domain *dmar_domain; @@ -3695,15 +3768,11 @@ intel_iommu_domain_alloc_user(struct device *dev, u= 32 flags, if (user_data || (dirty_tracking && !ssads_supported(iommu))) return ERR_PTR(-EOPNOTSUPP); =20 - /* - * domain_alloc_user op needs to fully initialize a domain before - * return, so uses iommu_domain_alloc() here for simple. - */ - domain =3D iommu_domain_alloc(dev->bus); - if (!domain) - return ERR_PTR(-ENOMEM); - - dmar_domain =3D to_dmar_domain(domain); + /* Do not use first stage for user domain translation. */ + dmar_domain =3D paging_domain_alloc(dev, false); + if (IS_ERR(dmar_domain)) + return ERR_CAST(dmar_domain); + domain =3D &dmar_domain->domain; =20 if (nested_parent) { dmar_domain->nested_parent =3D true; --=20 2.34.1