@@ -1175,6 +1175,17 @@ static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
11751175 return phys ;
11761176}
11771177
1178+ /*
1179+ * Checks if a physical buffer has unaligned boundaries with respect to
1180+ * the IOMMU granule. Returns non-zero if either the start or end
1181+ * address is not aligned to the granule boundary.
1182+ */
1183+ static inline size_t iova_unaligned (struct iova_domain * iovad , phys_addr_t phys ,
1184+ size_t size )
1185+ {
1186+ return iova_offset (iovad , phys | size );
1187+ }
1188+
11781189dma_addr_t iommu_dma_map_page (struct device * dev , struct page * page ,
11791190 unsigned long offset , size_t size , enum dma_data_direction dir ,
11801191 unsigned long attrs )
@@ -1192,7 +1203,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
11921203 * we don't need to use a bounce page.
11931204 */
11941205 if (dev_use_swiotlb (dev , size , dir ) &&
1195- iova_offset (iovad , phys | size )) {
1206+ iova_unaligned (iovad , phys , size )) {
11961207 phys = iommu_dma_map_swiotlb (dev , phys , size , dir , attrs );
11971208 if (phys == (phys_addr_t )DMA_MAPPING_ERROR )
11981209 return DMA_MAPPING_ERROR ;
@@ -1818,6 +1829,268 @@ void dma_iova_free(struct device *dev, struct dma_iova_state *state)
18181829}
18191830EXPORT_SYMBOL_GPL (dma_iova_free );
18201831
1832+ static int __dma_iova_link (struct device * dev , dma_addr_t addr ,
1833+ phys_addr_t phys , size_t size , enum dma_data_direction dir ,
1834+ unsigned long attrs )
1835+ {
1836+ bool coherent = dev_is_dma_coherent (dev );
1837+
1838+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC ))
1839+ arch_sync_dma_for_device (phys , size , dir );
1840+
1841+ return iommu_map_nosync (iommu_get_dma_domain (dev ), addr , phys , size ,
1842+ dma_info_to_prot (dir , coherent , attrs ), GFP_ATOMIC );
1843+ }
1844+
1845+ static int iommu_dma_iova_bounce_and_link (struct device * dev , dma_addr_t addr ,
1846+ phys_addr_t phys , size_t bounce_len ,
1847+ enum dma_data_direction dir , unsigned long attrs ,
1848+ size_t iova_start_pad )
1849+ {
1850+ struct iommu_domain * domain = iommu_get_dma_domain (dev );
1851+ struct iova_domain * iovad = & domain -> iova_cookie -> iovad ;
1852+ phys_addr_t bounce_phys ;
1853+ int error ;
1854+
1855+ bounce_phys = iommu_dma_map_swiotlb (dev , phys , bounce_len , dir , attrs );
1856+ if (bounce_phys == DMA_MAPPING_ERROR )
1857+ return - ENOMEM ;
1858+
1859+ error = __dma_iova_link (dev , addr - iova_start_pad ,
1860+ bounce_phys - iova_start_pad ,
1861+ iova_align (iovad , bounce_len ), dir , attrs );
1862+ if (error )
1863+ swiotlb_tbl_unmap_single (dev , bounce_phys , bounce_len , dir ,
1864+ attrs );
1865+ return error ;
1866+ }
1867+
1868+ static int iommu_dma_iova_link_swiotlb (struct device * dev ,
1869+ struct dma_iova_state * state , phys_addr_t phys , size_t offset ,
1870+ size_t size , enum dma_data_direction dir , unsigned long attrs )
1871+ {
1872+ struct iommu_domain * domain = iommu_get_dma_domain (dev );
1873+ struct iommu_dma_cookie * cookie = domain -> iova_cookie ;
1874+ struct iova_domain * iovad = & cookie -> iovad ;
1875+ size_t iova_start_pad = iova_offset (iovad , phys );
1876+ size_t iova_end_pad = iova_offset (iovad , phys + size );
1877+ dma_addr_t addr = state -> addr + offset ;
1878+ size_t mapped = 0 ;
1879+ int error ;
1880+
1881+ if (iova_start_pad ) {
1882+ size_t bounce_len = min (size , iovad -> granule - iova_start_pad );
1883+
1884+ error = iommu_dma_iova_bounce_and_link (dev , addr , phys ,
1885+ bounce_len , dir , attrs , iova_start_pad );
1886+ if (error )
1887+ return error ;
1888+ state -> __size |= DMA_IOVA_USE_SWIOTLB ;
1889+
1890+ mapped += bounce_len ;
1891+ size -= bounce_len ;
1892+ if (!size )
1893+ return 0 ;
1894+ }
1895+
1896+ size -= iova_end_pad ;
1897+ error = __dma_iova_link (dev , addr + mapped , phys + mapped , size , dir ,
1898+ attrs );
1899+ if (error )
1900+ goto out_unmap ;
1901+ mapped += size ;
1902+
1903+ if (iova_end_pad ) {
1904+ error = iommu_dma_iova_bounce_and_link (dev , addr + mapped ,
1905+ phys + mapped , iova_end_pad , dir , attrs , 0 );
1906+ if (error )
1907+ goto out_unmap ;
1908+ state -> __size |= DMA_IOVA_USE_SWIOTLB ;
1909+ }
1910+
1911+ return 0 ;
1912+
1913+ out_unmap :
1914+ dma_iova_unlink (dev , state , 0 , mapped , dir , attrs );
1915+ return error ;
1916+ }
1917+
1918+ /**
1919+ * dma_iova_link - Link a range of IOVA space
1920+ * @dev: DMA device
1921+ * @state: IOVA state
1922+ * @phys: physical address to link
1923+ * @offset: offset into the IOVA state to map into
1924+ * @size: size of the buffer
1925+ * @dir: DMA direction
1926+ * @attrs: attributes of mapping properties
1927+ *
1928+ * Link a range of IOVA space for the given IOVA state without IOTLB sync.
1929+ * This function is used to link multiple physical addresses in contiguous
1930+ * IOVA space without performing costly IOTLB sync.
1931+ *
1932+ * The caller is responsible to call to dma_iova_sync() to sync IOTLB at
1933+ * the end of linkage.
1934+ */
1935+ int dma_iova_link (struct device * dev , struct dma_iova_state * state ,
1936+ phys_addr_t phys , size_t offset , size_t size ,
1937+ enum dma_data_direction dir , unsigned long attrs )
1938+ {
1939+ struct iommu_domain * domain = iommu_get_dma_domain (dev );
1940+ struct iommu_dma_cookie * cookie = domain -> iova_cookie ;
1941+ struct iova_domain * iovad = & cookie -> iovad ;
1942+ size_t iova_start_pad = iova_offset (iovad , phys );
1943+
1944+ if (WARN_ON_ONCE (iova_start_pad && offset > 0 ))
1945+ return - EIO ;
1946+
1947+ if (dev_use_swiotlb (dev , size , dir ) &&
1948+ iova_unaligned (iovad , phys , size ))
1949+ return iommu_dma_iova_link_swiotlb (dev , state , phys , offset ,
1950+ size , dir , attrs );
1951+
1952+ return __dma_iova_link (dev , state -> addr + offset - iova_start_pad ,
1953+ phys - iova_start_pad ,
1954+ iova_align (iovad , size + iova_start_pad ), dir , attrs );
1955+ }
1956+ EXPORT_SYMBOL_GPL (dma_iova_link );
1957+
1958+ /**
1959+ * dma_iova_sync - Sync IOTLB
1960+ * @dev: DMA device
1961+ * @state: IOVA state
1962+ * @offset: offset into the IOVA state to sync
1963+ * @size: size of the buffer
1964+ *
1965+ * Sync IOTLB for the given IOVA state. This function should be called on
1966+ * the IOVA-contiguous range created by one ore more dma_iova_link() calls
1967+ * to sync the IOTLB.
1968+ */
1969+ int dma_iova_sync (struct device * dev , struct dma_iova_state * state ,
1970+ size_t offset , size_t size )
1971+ {
1972+ struct iommu_domain * domain = iommu_get_dma_domain (dev );
1973+ struct iommu_dma_cookie * cookie = domain -> iova_cookie ;
1974+ struct iova_domain * iovad = & cookie -> iovad ;
1975+ dma_addr_t addr = state -> addr + offset ;
1976+ size_t iova_start_pad = iova_offset (iovad , addr );
1977+
1978+ return iommu_sync_map (domain , addr - iova_start_pad ,
1979+ iova_align (iovad , size + iova_start_pad ));
1980+ }
1981+ EXPORT_SYMBOL_GPL (dma_iova_sync );
1982+
1983+ static void iommu_dma_iova_unlink_range_slow (struct device * dev ,
1984+ dma_addr_t addr , size_t size , enum dma_data_direction dir ,
1985+ unsigned long attrs )
1986+ {
1987+ struct iommu_domain * domain = iommu_get_dma_domain (dev );
1988+ struct iommu_dma_cookie * cookie = domain -> iova_cookie ;
1989+ struct iova_domain * iovad = & cookie -> iovad ;
1990+ size_t iova_start_pad = iova_offset (iovad , addr );
1991+ dma_addr_t end = addr + size ;
1992+
1993+ do {
1994+ phys_addr_t phys ;
1995+ size_t len ;
1996+
1997+ phys = iommu_iova_to_phys (domain , addr );
1998+ if (WARN_ON (!phys ))
1999+ /* Something very horrible happen here */
2000+ return ;
2001+
2002+ len = min_t (size_t ,
2003+ end - addr , iovad -> granule - iova_start_pad );
2004+
2005+ if (!dev_is_dma_coherent (dev ) &&
2006+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC ))
2007+ arch_sync_dma_for_cpu (phys , len , dir );
2008+
2009+ swiotlb_tbl_unmap_single (dev , phys , len , dir , attrs );
2010+
2011+ addr += len ;
2012+ iova_start_pad = 0 ;
2013+ } while (addr < end );
2014+ }
2015+
2016+ static void __iommu_dma_iova_unlink (struct device * dev ,
2017+ struct dma_iova_state * state , size_t offset , size_t size ,
2018+ enum dma_data_direction dir , unsigned long attrs ,
2019+ bool free_iova )
2020+ {
2021+ struct iommu_domain * domain = iommu_get_dma_domain (dev );
2022+ struct iommu_dma_cookie * cookie = domain -> iova_cookie ;
2023+ struct iova_domain * iovad = & cookie -> iovad ;
2024+ dma_addr_t addr = state -> addr + offset ;
2025+ size_t iova_start_pad = iova_offset (iovad , addr );
2026+ struct iommu_iotlb_gather iotlb_gather ;
2027+ size_t unmapped ;
2028+
2029+ if ((state -> __size & DMA_IOVA_USE_SWIOTLB ) ||
2030+ (!dev_is_dma_coherent (dev ) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC )))
2031+ iommu_dma_iova_unlink_range_slow (dev , addr , size , dir , attrs );
2032+
2033+ iommu_iotlb_gather_init (& iotlb_gather );
2034+ iotlb_gather .queued = free_iova && READ_ONCE (cookie -> fq_domain );
2035+
2036+ size = iova_align (iovad , size + iova_start_pad );
2037+ addr -= iova_start_pad ;
2038+ unmapped = iommu_unmap_fast (domain , addr , size , & iotlb_gather );
2039+ WARN_ON (unmapped != size );
2040+
2041+ if (!iotlb_gather .queued )
2042+ iommu_iotlb_sync (domain , & iotlb_gather );
2043+ if (free_iova )
2044+ iommu_dma_free_iova (domain , addr , size , & iotlb_gather );
2045+ }
2046+
2047+ /**
2048+ * dma_iova_unlink - Unlink a range of IOVA space
2049+ * @dev: DMA device
2050+ * @state: IOVA state
2051+ * @offset: offset into the IOVA state to unlink
2052+ * @size: size of the buffer
2053+ * @dir: DMA direction
2054+ * @attrs: attributes of mapping properties
2055+ *
2056+ * Unlink a range of IOVA space for the given IOVA state.
2057+ */
2058+ void dma_iova_unlink (struct device * dev , struct dma_iova_state * state ,
2059+ size_t offset , size_t size , enum dma_data_direction dir ,
2060+ unsigned long attrs )
2061+ {
2062+ __iommu_dma_iova_unlink (dev , state , offset , size , dir , attrs , false);
2063+ }
2064+ EXPORT_SYMBOL_GPL (dma_iova_unlink );
2065+
2066+ /**
2067+ * dma_iova_destroy - Finish a DMA mapping transaction
2068+ * @dev: DMA device
2069+ * @state: IOVA state
2070+ * @mapped_len: number of bytes to unmap
2071+ * @dir: DMA direction
2072+ * @attrs: attributes of mapping properties
2073+ *
2074+ * Unlink the IOVA range up to @mapped_len and free the entire IOVA space. The
2075+ * range of IOVA from dma_addr to @mapped_len must all be linked, and be the
2076+ * only linked IOVA in state.
2077+ */
2078+ void dma_iova_destroy (struct device * dev , struct dma_iova_state * state ,
2079+ size_t mapped_len , enum dma_data_direction dir ,
2080+ unsigned long attrs )
2081+ {
2082+ if (mapped_len )
2083+ __iommu_dma_iova_unlink (dev , state , 0 , mapped_len , dir , attrs ,
2084+ true);
2085+ else
2086+ /*
2087+ * We can be here if first call to dma_iova_link() failed and
2088+ * there is nothing to unlink, so let's be more clear.
2089+ */
2090+ dma_iova_free (dev , state );
2091+ }
2092+ EXPORT_SYMBOL_GPL (dma_iova_destroy );
2093+
18212094void iommu_setup_dma_ops (struct device * dev )
18222095{
18232096 struct iommu_domain * domain = iommu_get_domain_for_dev (dev );
0 commit comments