drm/xe: Support pcie p2p dma as a fast interconnect

Mimic the dma-buf method using dma_[map|unmap]_resource to map
for pcie-p2p dma.

There's an ongoing area of work upstream to sort out how this best
should be done. One method proposed is to add an additional
pci_p2p_dma_pagemap aliasing the device_private pagemap and use
the corresponding pci_p2p_dma_pagemap page as input for
dma_map_page(). However, that would incur double the amount of
memory and latency to set up the drm_pagemap and given the huge
amount of memory present on modern GPUs, that would really not work.
Hence the simple approach used in this patch.

v2:
- Simplify xe_page_to_pcie(). (Matt Brost)

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20251219113320.183860-17-thomas.hellstrom@linux.intel.com
This commit is contained in:
Thomas Hellström
2025-12-19 12:33:12 +01:00
parent dff547e137
commit 2df55d9e66
2 changed files with 32 additions and 3 deletions

View File

@@ -3,6 +3,8 @@
* Copyright © 2024 Intel Corporation
*/
#include <linux/pci-p2pdma.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_pagemap.h>
@@ -441,6 +443,14 @@ static u64 xe_page_to_dpa(struct page *page)
return dpa;
}
static u64 xe_page_to_pcie(struct page *page)
{
struct xe_pagemap *xpagemap = xe_page_to_pagemap(page);
struct xe_vram_region *vr = xe_pagemap_to_vr(xpagemap);
return xe_page_to_dpa(page) - vr->dpa_base + vr->io_start;
}
enum xe_svm_copy_dir {
XE_SVM_COPY_TO_VRAM,
XE_SVM_COPY_TO_SRAM,
@@ -804,7 +814,10 @@ static bool xe_has_interconnect(struct drm_pagemap_peer *peer1,
struct device *dev1 = xe_peer_to_dev(peer1);
struct device *dev2 = xe_peer_to_dev(peer2);
return dev1 == dev2;
if (dev1 == dev2)
return true;
return pci_p2pdma_distance(to_pci_dev(dev1), dev2, true) >= 0;
}
static DRM_PAGEMAP_OWNER_LIST_DEFINE(xe_owner_list);
@@ -1577,13 +1590,27 @@ xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
addr = xe_page_to_dpa(page);
prot = XE_INTERCONNECT_VRAM;
} else {
addr = DMA_MAPPING_ERROR;
prot = 0;
addr = dma_map_resource(dev,
xe_page_to_pcie(page),
PAGE_SIZE << order, dir,
DMA_ATTR_SKIP_CPU_SYNC);
prot = XE_INTERCONNECT_P2P;
}
return drm_pagemap_addr_encode(addr, prot, order, dir);
}
static void xe_drm_pagemap_device_unmap(struct drm_pagemap *dpagemap,
struct device *dev,
struct drm_pagemap_addr addr)
{
if (addr.proto != XE_INTERCONNECT_P2P)
return;
dma_unmap_resource(dev, addr.addr, PAGE_SIZE << addr.order,
addr.dir, DMA_ATTR_SKIP_CPU_SYNC);
}
static void xe_pagemap_destroy_work(struct work_struct *work)
{
struct xe_pagemap *xpagemap = container_of(work, typeof(*xpagemap), destroy_work);
@@ -1620,6 +1647,7 @@ static void xe_pagemap_destroy(struct drm_pagemap *dpagemap, bool from_atomic_or
static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
.device_map = xe_drm_pagemap_device_map,
.device_unmap = xe_drm_pagemap_device_unmap,
.populate_mm = xe_drm_pagemap_populate_mm,
.destroy = xe_pagemap_destroy,
};

View File

@@ -13,6 +13,7 @@
#include <drm/drm_pagemap_util.h>
#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
#define XE_INTERCONNECT_P2P (XE_INTERCONNECT_VRAM + 1)
struct drm_device;
struct drm_file;