drm/gpusvm: export drm_gpusvm_pages API

Export get/unmap/free pages API. We also need to tweak the SVM init to
allow skipping much of the unneeded parts.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250828142430.615826-15-matthew.auld@intel.com
This commit is contained in:
Matthew Auld
2025-08-28 15:24:36 +01:00
parent 6364afd532
commit 83f706ecbd
2 changed files with 67 additions and 15 deletions

View File

@@ -373,6 +373,12 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
*
* This function initializes the GPU SVM.
*
* Note: If only using the simple drm_gpusvm_pages API (get/unmap/free),
* then only @gpusvm, @name, and @drm are expected. However, the same base
* @gpusvm can also be used with both modes together in which case the full
* setup is needed, where the core drm_gpusvm_pages API will simply never use
* the other fields.
*
* Return: 0 on success, a negative error code on failure.
*/
int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
@@ -383,8 +389,16 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
const struct drm_gpusvm_ops *ops,
const unsigned long *chunk_sizes, int num_chunks)
{
if (!ops->invalidate || !num_chunks)
return -EINVAL;
if (mm) {
if (!ops->invalidate || !num_chunks)
return -EINVAL;
mmgrab(mm);
} else {
/* No full SVM mode, only core drm_gpusvm_pages API. */
if (ops || num_chunks || mm_range || notifier_size ||
device_private_page_owner)
return -EINVAL;
}
gpusvm->name = name;
gpusvm->drm = drm;
@@ -397,7 +411,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
gpusvm->chunk_sizes = chunk_sizes;
gpusvm->num_chunks = num_chunks;
mmgrab(mm);
gpusvm->root = RB_ROOT_CACHED;
INIT_LIST_HEAD(&gpusvm->notifier_list);
@@ -489,7 +502,8 @@ void drm_gpusvm_fini(struct drm_gpusvm *gpusvm)
drm_gpusvm_range_remove(gpusvm, range);
}
mmdrop(gpusvm->mm);
if (gpusvm->mm)
mmdrop(gpusvm->mm);
WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root));
}
EXPORT_SYMBOL_GPL(drm_gpusvm_fini);
@@ -1044,6 +1058,27 @@ static void __drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
}
}
/**
* drm_gpusvm_free_pages() - Free dma-mapping associated with GPU SVM pages
* struct
* @gpusvm: Pointer to the GPU SVM structure
* @svm_pages: Pointer to the GPU SVM pages structure
* @npages: Number of mapped pages
*
* This function unmaps and frees the dma address array associated with a GPU
* SVM pages struct.
*/
void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_pages *svm_pages,
unsigned long npages)
{
drm_gpusvm_notifier_lock(gpusvm);
__drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages);
__drm_gpusvm_free_pages(gpusvm, svm_pages);
drm_gpusvm_notifier_unlock(gpusvm);
}
EXPORT_SYMBOL_GPL(drm_gpusvm_free_pages);
/**
* drm_gpusvm_range_remove() - Remove GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
@@ -1220,13 +1255,12 @@ static bool drm_gpusvm_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
*
* Return: 0 on success, negative error code on failure.
*/
static int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_pages *svm_pages,
struct mm_struct *mm,
struct mmu_interval_notifier *notifier,
unsigned long pages_start,
unsigned long pages_end,
const struct drm_gpusvm_ctx *ctx)
int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_pages *svm_pages,
struct mm_struct *mm,
struct mmu_interval_notifier *notifier,
unsigned long pages_start, unsigned long pages_end,
const struct drm_gpusvm_ctx *ctx)
{
struct hmm_range hmm_range = {
.default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 :
@@ -1415,6 +1449,7 @@ err_free:
goto retry;
return err;
}
EXPORT_SYMBOL_GPL(drm_gpusvm_get_pages);
/**
* drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
@@ -1451,10 +1486,10 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
* Must be called in the invalidate() callback of the corresponding notifier for
* IOMMU security model.
*/
static void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_pages *svm_pages,
unsigned long npages,
const struct drm_gpusvm_ctx *ctx)
void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_pages *svm_pages,
unsigned long npages,
const struct drm_gpusvm_ctx *ctx)
{
if (ctx->in_notifier)
lockdep_assert_held_write(&gpusvm->notifier_lock);
@@ -1466,6 +1501,7 @@ static void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
if (!ctx->in_notifier)
drm_gpusvm_notifier_unlock(gpusvm);
}
EXPORT_SYMBOL_GPL(drm_gpusvm_unmap_pages);
/**
* drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range

View File

@@ -307,6 +307,22 @@ drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
const struct mmu_notifier_range *mmu_range);
int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_pages *svm_pages,
struct mm_struct *mm,
struct mmu_interval_notifier *notifier,
unsigned long pages_start, unsigned long pages_end,
const struct drm_gpusvm_ctx *ctx);
void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_pages *svm_pages,
unsigned long npages,
const struct drm_gpusvm_ctx *ctx);
void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_pages *svm_pages,
unsigned long npages);
#ifdef CONFIG_LOCKDEP
/**
* drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM