From 1a3c7d0841ae24d28a15bbf87ee7d08614cec957 Mon Sep 17 00:00:00 2001 From: Dongli Zhang Date: Sat, 11 Jun 2022 01:25:11 -0700 Subject: swiotlb: remove the unused swiotlb_force declaration The 'swiotlb_force' is removed since commit c6af2aa9ffc9 ("swiotlb: make the swiotlb_init interface more useful"). Signed-off-by: Dongli Zhang Signed-off-by: Christoph Hellwig --- include/linux/swiotlb.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 7ed35dd3de6e..bdc58a0e20b1 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -60,7 +60,6 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs); #ifdef CONFIG_SWIOTLB -extern enum swiotlb_force swiotlb_force; /** * struct io_tlb_mem - IO TLB Memory Pool Descriptor -- cgit v1.2.3 From 20347fca71a387a3751f7bb270062616ddc5317a Mon Sep 17 00:00:00 2001 From: Tianyu Lan Date: Fri, 8 Jul 2022 12:15:44 -0400 Subject: swiotlb: split up the global swiotlb lock Traditionally swiotlb was not performance critical because it was only used for slow devices. But in some setups, like TDX/SEV confidential guests, all IO has to go through swiotlb. Currently swiotlb only has a single lock. Under high IO load with multiple CPUs this can lead to significat lock contention on the swiotlb lock. This patch splits the swiotlb bounce buffer pool into individual areas which have their own lock. Each CPU tries to allocate in its own area first. Only if that fails does it search other areas. On freeing the allocation is freed into the area where the memory was originally allocated from. Area number can be set via swiotlb kernel parameter and is default to be possible cpu number. If possible cpu number is not power of 2, area number will be round up to the next power of 2. This idea from Andi Kleen patch(https://github.com/intel/tdx/commit/ 4529b5784c141782c72ec9bd9a92df2b68cb7d45). Based-on-idea-by: Andi Kleen Signed-off-by: Tianyu Lan Signed-off-by: Christoph Hellwig --- include/linux/swiotlb.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index bdc58a0e20b1..f65ff1930120 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -88,6 +88,8 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, * @late_alloc: %true if allocated using the page allocator * @force_bounce: %true if swiotlb bouncing is forced * @for_alloc: %true if the pool is used for memory allocation + * @nareas: The area number in the pool. + * @area_nslabs: The slot number in the area. */ struct io_tlb_mem { phys_addr_t start; @@ -101,6 +103,9 @@ struct io_tlb_mem { bool late_alloc; bool force_bounce; bool for_alloc; + unsigned int nareas; + unsigned int area_nslabs; + struct io_tlb_area *areas; struct io_tlb_slot { phys_addr_t orig_addr; size_t alloc_size; -- cgit v1.2.3 From 91561d4ecb755f056f8ff04f9dcaec210140e55c Mon Sep 17 00:00:00 2001 From: Chao Gao Date: Fri, 15 Jul 2022 18:45:33 +0800 Subject: swiotlb: remove unused fields in io_tlb_mem Commit 20347fca71a3 ("swiotlb: split up the global swiotlb lock") splits io_tlb_mem into multiple areas. Each area has its own lock and index. The global ones are not used so remove them. Signed-off-by: Chao Gao Signed-off-by: Christoph Hellwig --- include/linux/swiotlb.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include') diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index f65ff1930120..d3ae03edbbd2 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -79,11 +79,8 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, * @used: The number of used IO TLB block. * @list: The free list describing the number of free entries available * from each index. - * @index: The index to start searching in the next round. * @orig_addr: The original address corresponding to a mapped entry. * @alloc_size: Size of the allocated buffer. - * @lock: The lock to protect the above data structures in the map and - * unmap calls. * @debugfs: The dentry to debugfs. * @late_alloc: %true if allocated using the page allocator * @force_bounce: %true if swiotlb bouncing is forced @@ -97,8 +94,6 @@ struct io_tlb_mem { void *vaddr; unsigned long nslabs; unsigned long used; - unsigned int index; - spinlock_t lock; struct dentry *debugfs; bool late_alloc; bool force_bounce; -- cgit v1.2.3 From 942a8186eb4451700dadd1d60a2e43ce67605991 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Jul 2022 08:43:07 +0200 Subject: swiotlb: move struct io_tlb_slot to swiotlb.c No need to expose this structure definition in the header. Signed-off-by: Christoph Hellwig --- include/linux/swiotlb.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index d3ae03edbbd2..35bc4e281c21 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -101,11 +101,7 @@ struct io_tlb_mem { unsigned int nareas; unsigned int area_nslabs; struct io_tlb_area *areas; - struct io_tlb_slot { - phys_addr_t orig_addr; - size_t alloc_size; - unsigned int list; - } *slots; + struct io_tlb_slot *slots; }; extern struct io_tlb_mem io_tlb_default_mem; -- cgit v1.2.3 From a229cc14f3395311b899e5e582b71efa8dd01df0 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 14 Jul 2022 19:15:24 +0800 Subject: dma-mapping: add dma_opt_mapping_size() Streaming DMA mapping involving an IOMMU may be much slower for larger total mapping size. This is because every IOMMU DMA mapping requires an IOVA to be allocated and freed. IOVA sizes above a certain limit are not cached, which can have a big impact on DMA mapping performance. Provide an API for device drivers to know this "optimal" limit, such that they may try to produce mapping which don't exceed it. Signed-off-by: John Garry Reviewed-by: Damien Le Moal Acked-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- include/linux/dma-map-ops.h | 1 + include/linux/dma-mapping.h | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 0d5b06b3a4a6..98ceba6fa848 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -69,6 +69,7 @@ struct dma_map_ops { int (*dma_supported)(struct device *dev, u64 mask); u64 (*get_required_mask)(struct device *dev); size_t (*max_mapping_size)(struct device *dev); + size_t (*opt_mapping_size)(void); unsigned long (*get_merge_boundary)(struct device *dev); }; diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index dca2b1355bb1..fe3849434b2a 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -144,6 +144,7 @@ int dma_set_mask(struct device *dev, u64 mask); int dma_set_coherent_mask(struct device *dev, u64 mask); u64 dma_get_required_mask(struct device *dev); size_t dma_max_mapping_size(struct device *dev); +size_t dma_opt_mapping_size(struct device *dev); bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); unsigned long dma_get_merge_boundary(struct device *dev); struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, @@ -266,6 +267,10 @@ static inline size_t dma_max_mapping_size(struct device *dev) { return 0; } +static inline size_t dma_opt_mapping_size(struct device *dev) +{ + return 0; +} static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) { return false; -- cgit v1.2.3 From 6d9870b7e5def2450e21316515b9efc0529204dd Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 14 Jul 2022 19:15:25 +0800 Subject: dma-iommu: add iommu_dma_opt_mapping_size() Add the IOMMU callback for DMA mapping API dma_opt_mapping_size(), which allows the drivers to know the optimal mapping limit and thus limit the requested IOVA lengths. This value is based on the IOVA rcache range limit, as IOVAs allocated above this limit must always be newly allocated, which may be quite slow. Signed-off-by: John Garry Reviewed-by: Damien Le Moal Acked-by: Robin Murphy Acked-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- include/linux/iova.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/iova.h b/include/linux/iova.h index 320a70e40233..c6ba6d95d79c 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -79,6 +79,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) int iova_cache_get(void); void iova_cache_put(void); +unsigned long iova_rcache_range(void); + void free_iova(struct iova_domain *iovad, unsigned long pfn); void __free_iova(struct iova_domain *iovad, struct iova *iova); struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, -- cgit v1.2.3 From 608128d391fa5c95d515d2c198f40239016e1fcd Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 14 Jul 2022 19:15:27 +0800 Subject: scsi: sd: allow max_sectors be capped at DMA optimal size limit Streaming DMA mappings may be considerably slower when mappings go through an IOMMU and the total mapping length is somewhat long. This is because the IOMMU IOVA code allocates and free an IOVA for each mapping, which may affect performance. New member Scsi_Host.opt_sectors is added, which is the optimal host max_sectors, and use this value to cap the request queue max_sectors when set. It could be considered to have request queues io_opt value initially set at Scsi_Host.opt_sectors in __scsi_init_queue(), but that is not really the purpose of io_opt. Finally, even though Scsi_Host.opt_sectors value should never be greater than the request queue max_hw_sectors value, continue to limit to this value for safety. Signed-off-by: John Garry Reviewed-by: Damien Le Moal Acked-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- include/scsi/scsi_host.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 667d889b92b5..d32a84b2bb40 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -607,6 +607,7 @@ struct Scsi_Host { short unsigned int sg_tablesize; short unsigned int sg_prot_tablesize; unsigned int max_sectors; + unsigned int opt_sectors; unsigned int max_segment_size; unsigned long dma_boundary; unsigned long virt_boundary_mask; -- cgit v1.2.3 From 42399301203e3cddef36cde457228f9247618313 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 8 Jul 2022 10:50:52 -0600 Subject: lib/scatterlist: add flag for indicating P2PDMA segments in an SGL Introduce a dma_flags field in struct scatterlist. These flags will be used by dma_[un]map_sg_p2pdma() to determine when a given SGL segments dma_address points to a PCI bus address. dma_unmap_sg_p2pdma() will need to perform different cleanup when a segment is marked as a bus address. The dma_flags field will fit in the existing padding on 64BIT systems (assuming CONFIG_NEED_SG_DMA_LENGTH is also set). The new bit will only be used when CONFIG_PCI_P2PDMA is set; this means PCI P2PDMA will require CONFIG_64BIT. This should be acceptable as the majority of P2PDMA use cases are restricted to newer root complexes and roughly require the extra address space for memory BARs used in the transactions. Signed-off-by: Logan Gunthorpe Signed-off-by: Christoph Hellwig --- include/linux/scatterlist.h | 69 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) (limited to 'include') diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 7ff9d6386c12..375a5e90d86a 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -16,6 +16,9 @@ struct scatterlist { #ifdef CONFIG_NEED_SG_DMA_LENGTH unsigned int dma_length; #endif +#ifdef CONFIG_PCI_P2PDMA + unsigned int dma_flags; +#endif }; /* @@ -245,6 +248,72 @@ static inline void sg_unmark_end(struct scatterlist *sg) sg->page_link &= ~SG_END; } +/* + * CONFGI_PCI_P2PDMA depends on CONFIG_64BIT which means there is 4 bytes + * in struct scatterlist (assuming also CONFIG_NEED_SG_DMA_LENGTH is set). + * Use this padding for DMA flags bits to indicate when a specific + * dma address is a bus address. + */ +#ifdef CONFIG_PCI_P2PDMA + +#define SG_DMA_BUS_ADDRESS (1 << 0) + +/** + * sg_dma_is_bus address - Return whether a given segment was marked + * as a bus address + * @sg: SG entry + * + * Description: + * Returns true if sg_dma_mark_bus_address() has been called on + * this segment. + **/ +static inline bool sg_is_dma_bus_address(struct scatterlist *sg) +{ + return sg->dma_flags & SG_DMA_BUS_ADDRESS; +} + +/** + * sg_dma_mark_bus address - Mark the scatterlist entry as a bus address + * @sg: SG entry + * + * Description: + * Marks the passed in sg entry to indicate that the dma_address is + * a bus address and doesn't need to be unmapped. This should only be + * used by dma_map_sg() implementations to mark bus addresses + * so they can be properly cleaned up in dma_unmap_sg(). + **/ +static inline void sg_dma_mark_bus_address(struct scatterlist *sg) +{ + sg->dma_flags |= SG_DMA_BUS_ADDRESS; +} + +/** + * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address + * @sg: SG entry + * + * Description: + * Clears the bus address mark. + **/ +static inline void sg_dma_unmark_bus_address(struct scatterlist *sg) +{ + sg->dma_flags &= ~SG_DMA_BUS_ADDRESS; +} + +#else + +static inline bool sg_is_dma_bus_address(struct scatterlist *sg) +{ + return false; +} +static inline void sg_dma_mark_bus_address(struct scatterlist *sg) +{ +} +static inline void sg_dma_unmark_bus_address(struct scatterlist *sg) +{ +} + +#endif + /** * sg_phys - Return physical address of an sg entry * @sg: SG entry -- cgit v1.2.3 From 5e180ff326b43bd3fb72892e1083b2707159aa6e Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 8 Jul 2022 10:50:54 -0600 Subject: PCI/P2PDMA: Introduce helpers for dma_map_sg implementations Add pci_p2pdma_map_segment() as a helper for dma_map_sg() implementations. It takes an scatterlist segment that must point to a pci_p2pdma struct page and will map it if the mapping requires a bus address. The return value indicates whether the mapping required a bus address or whether the caller still needs to map the segment normally. If the segment should not be mapped, -EREMOTEIO is returned. This helper uses a state structure to track the changes to the pgmap across calls and avoid needing to lookup into the xarray for every page. The prototype for the helper is added to dma-map-ops.h as it is only useful to dma map implementations and don't need to pollute the public pci-p2pdma header. Signed-off-by: Logan Gunthorpe Acked-by: Bjorn Helgaas Reviewed-by: Christoph Hellwig Signed-off-by: Christoph Hellwig --- include/linux/dma-map-ops.h | 53 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) (limited to 'include') diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 98ceba6fa848..99cec59dbfcb 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -380,4 +380,57 @@ static inline void debug_dma_dump_mappings(struct device *dev) extern const struct dma_map_ops dma_dummy_ops; +enum pci_p2pdma_map_type { + /* + * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping + * type hasn't been calculated yet. Functions that return this enum + * never return this value. + */ + PCI_P2PDMA_MAP_UNKNOWN = 0, + + /* + * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will + * traverse the host bridge and the host bridge is not in the + * allowlist. DMA Mapping routines should return an error when + * this is returned. + */ + PCI_P2PDMA_MAP_NOT_SUPPORTED, + + /* + * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to + * each other directly through a PCI switch and the transaction will + * not traverse the host bridge. Such a mapping should program + * the DMA engine with PCI bus addresses. + */ + PCI_P2PDMA_MAP_BUS_ADDR, + + /* + * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk + * to each other, but the transaction traverses a host bridge on the + * allowlist. In this case, a normal mapping either with CPU physical + * addresses (in the case of dma-direct) or IOVA addresses (in the + * case of IOMMUs) should be used to program the DMA engine. + */ + PCI_P2PDMA_MAP_THRU_HOST_BRIDGE, +}; + +struct pci_p2pdma_map_state { + struct dev_pagemap *pgmap; + int map; + u64 bus_off; +}; + +#ifdef CONFIG_PCI_P2PDMA +enum pci_p2pdma_map_type +pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, + struct scatterlist *sg); +#else /* CONFIG_PCI_P2PDMA */ +static inline enum pci_p2pdma_map_type +pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, + struct scatterlist *sg) +{ + return PCI_P2PDMA_MAP_NOT_SUPPORTED; +} +#endif /* CONFIG_PCI_P2PDMA */ + #endif /* _LINUX_DMA_MAP_OPS_H */ -- cgit v1.2.3 From 159bf19270e80b5bc4b13aa88072dcb390b4d297 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 8 Jul 2022 10:50:57 -0600 Subject: dma-mapping: add flags to dma_map_ops to indicate PCI P2PDMA support Add a flags member to the dma_map_ops structure with one flag to indicate support for PCI P2PDMA. Also, add a helper to check if a device supports PCI P2PDMA. Signed-off-by: Logan Gunthorpe Reviewed-by: Jason Gunthorpe Reviewed-by: Christoph Hellwig Signed-off-by: Christoph Hellwig --- include/linux/dma-map-ops.h | 10 ++++++++++ include/linux/dma-mapping.h | 5 +++++ 2 files changed, 15 insertions(+) (limited to 'include') diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 99cec59dbfcb..010df04358aa 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -11,7 +11,17 @@ struct cma; +/* + * Values for struct dma_map_ops.flags: + * + * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can + * handle PCI P2PDMA pages in the map_sg/unmap_sg operation. + */ +#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0) + struct dma_map_ops { + unsigned int flags; + void *(*alloc)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index fe3849434b2a..25a30906289d 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -140,6 +140,7 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, unsigned long attrs); bool dma_can_mmap(struct device *dev); int dma_supported(struct device *dev, u64 mask); +bool dma_pci_p2pdma_supported(struct device *dev); int dma_set_mask(struct device *dev, u64 mask); int dma_set_coherent_mask(struct device *dev, u64 mask); u64 dma_get_required_mask(struct device *dev); @@ -251,6 +252,10 @@ static inline int dma_supported(struct device *dev, u64 mask) { return 0; } +static inline bool dma_pci_p2pdma_supported(struct device *dev) +{ + return false; +} static inline int dma_set_mask(struct device *dev, u64 mask) { return -EIO; -- cgit v1.2.3 From 495758bb1a72316094ee854dc51f667ad3db29b5 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 8 Jul 2022 10:51:02 -0600 Subject: RDMA/core: introduce ib_dma_pci_p2p_dma_supported() Introduce the helper function ib_dma_pci_p2p_dma_supported() to check if a given ib_device can be used in P2PDMA transfers. This ensures the ib_device is not using virt_dma and also that the underlying dma_device supports P2PDMA. Use the new helper in nvme-rdma to replace the existing check for ib_uses_virt_dma(). Adding the dma_pci_p2pdma_supported() check allows switching away from pci_p2pdma_[un]map_sg(). Signed-off-by: Logan Gunthorpe Reviewed-by: Jason Gunthorpe Reviewed-by: Max Gurtovoy Reviewed-by: Christoph Hellwig Signed-off-by: Christoph Hellwig --- include/rdma/ib_verbs.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 9c6317cf80d5..523843d9ed6c 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -4013,6 +4013,17 @@ static inline bool ib_uses_virt_dma(struct ib_device *dev) return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device; } +/* + * Check if a IB device's underlying DMA mapping supports P2PDMA transfers. + */ +static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev) +{ + if (ib_uses_virt_dma(dev)) + return false; + + return dma_pci_p2pdma_supported(dev->dma_device); +} + /** * ib_dma_mapping_error - check a DMA addr for error * @dev: The device for which the dma_addr was created -- cgit v1.2.3 From 0d06132fc84bd91379300768c75a19474e06d0c5 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 8 Jul 2022 10:51:04 -0600 Subject: PCI/P2PDMA: Remove pci_p2pdma_[un]map_sg() This interface is superseded by support in dma_map_sg() which now supports heterogeneous scatterlists. There are no longer any users, so remove it. Signed-off-by: Logan Gunthorpe Acked-by: Bjorn Helgaas Reviewed-by: Jason Gunthorpe Reviewed-by: Max Gurtovoy Reviewed-by: Christoph Hellwig Signed-off-by: Christoph Hellwig --- include/linux/pci-p2pdma.h | 27 --------------------------- 1 file changed, 27 deletions(-) (limited to 'include') diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h index 8318a97c9c61..2c07aa6b7665 100644 --- a/include/linux/pci-p2pdma.h +++ b/include/linux/pci-p2pdma.h @@ -30,10 +30,6 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, unsigned int *nents, u32 length); void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl); void pci_p2pmem_publish(struct pci_dev *pdev, bool publish); -int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs); -void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs); int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma); ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, @@ -83,17 +79,6 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev, static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) { } -static inline int pci_p2pdma_map_sg_attrs(struct device *dev, - struct scatterlist *sg, int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - return 0; -} -static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev, - struct scatterlist *sg, int nents, enum dma_data_direction dir, - unsigned long attrs) -{ -} static inline int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma) { @@ -119,16 +104,4 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client) return pci_p2pmem_find_many(&client, 1); } -static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0); -} - -static inline void pci_p2pdma_unmap_sg(struct device *dev, - struct scatterlist *sg, int nents, enum dma_data_direction dir) -{ - pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0); -} - #endif /* _LINUX_PCI_P2P_H */ -- cgit v1.2.3