mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2026-03-03 18:28:01 +01:00
accel/amdxdna: Add BO import and export
Add amdxdna_gem_prime_export() and amdxdna_gem_prime_import() for BO import and export. Register mmu notifier for imported BO as well. When MMU_NOTIFIER_UNMAP event is received, queue work to remove the notifier. The same BO could be mapped multiple times if it is exported and imported by an application. Use a link list to track VMAs the BO been mapped. v2: Rebased and call get_dma_buf() before dma_buf_attach() v3: Removed import_attach usage Reviewed-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com> Signed-off-by: Lizhi Hou <lizhi.hou@amd.com> Link: https://lore.kernel.org/r/20250325200105.2744079-1-lizhi.hou@amd.com
This commit is contained in:
@@ -1,3 +1,2 @@
|
||||
- Add import and export BO support
|
||||
- Add debugfs support
|
||||
- Add debug BO support
|
||||
|
||||
@@ -758,27 +758,42 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu
|
||||
static int aie2_populate_range(struct amdxdna_gem_obj *abo)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
struct mm_struct *mm = abo->mem.notifier.mm;
|
||||
struct hmm_range range = { 0 };
|
||||
struct amdxdna_umap *mapp;
|
||||
unsigned long timeout;
|
||||
struct mm_struct *mm;
|
||||
bool found;
|
||||
int ret;
|
||||
|
||||
XDNA_INFO_ONCE(xdna, "populate memory range %llx size %lx",
|
||||
abo->mem.userptr, abo->mem.size);
|
||||
range.notifier = &abo->mem.notifier;
|
||||
range.start = abo->mem.userptr;
|
||||
range.end = abo->mem.userptr + abo->mem.size;
|
||||
range.hmm_pfns = abo->mem.pfns;
|
||||
range.default_flags = HMM_PFN_REQ_FAULT;
|
||||
|
||||
if (!mmget_not_zero(mm))
|
||||
return -EFAULT;
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
|
||||
again:
|
||||
range.notifier_seq = mmu_interval_read_begin(&abo->mem.notifier);
|
||||
found = false;
|
||||
down_write(&xdna->notifier_lock);
|
||||
list_for_each_entry(mapp, &abo->mem.umap_list, node) {
|
||||
if (mapp->invalid) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
abo->mem.map_invalid = false;
|
||||
up_write(&xdna->notifier_lock);
|
||||
return 0;
|
||||
}
|
||||
kref_get(&mapp->refcnt);
|
||||
up_write(&xdna->notifier_lock);
|
||||
|
||||
XDNA_DBG(xdna, "populate memory range %lx %lx",
|
||||
mapp->vma->vm_start, mapp->vma->vm_end);
|
||||
mm = mapp->notifier.mm;
|
||||
if (!mmget_not_zero(mm)) {
|
||||
amdxdna_umap_put(mapp);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
|
||||
mmap_read_lock(mm);
|
||||
ret = hmm_range_fault(&range);
|
||||
ret = hmm_range_fault(&mapp->range);
|
||||
mmap_read_unlock(mm);
|
||||
if (ret) {
|
||||
if (time_after(jiffies, timeout)) {
|
||||
@@ -786,21 +801,27 @@ again:
|
||||
goto put_mm;
|
||||
}
|
||||
|
||||
if (ret == -EBUSY)
|
||||
if (ret == -EBUSY) {
|
||||
amdxdna_umap_put(mapp);
|
||||
goto again;
|
||||
}
|
||||
|
||||
goto put_mm;
|
||||
}
|
||||
|
||||
down_read(&xdna->notifier_lock);
|
||||
if (mmu_interval_read_retry(&abo->mem.notifier, range.notifier_seq)) {
|
||||
up_read(&xdna->notifier_lock);
|
||||
down_write(&xdna->notifier_lock);
|
||||
if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
|
||||
up_write(&xdna->notifier_lock);
|
||||
amdxdna_umap_put(mapp);
|
||||
goto again;
|
||||
}
|
||||
abo->mem.map_invalid = false;
|
||||
up_read(&xdna->notifier_lock);
|
||||
mapp->invalid = false;
|
||||
up_write(&xdna->notifier_lock);
|
||||
amdxdna_umap_put(mapp);
|
||||
goto again;
|
||||
|
||||
put_mm:
|
||||
amdxdna_umap_put(mapp);
|
||||
mmput(mm);
|
||||
return ret;
|
||||
}
|
||||
@@ -908,10 +929,6 @@ void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
|
||||
struct drm_gem_object *gobj = to_gobj(abo);
|
||||
long ret;
|
||||
|
||||
down_write(&xdna->notifier_lock);
|
||||
abo->mem.map_invalid = true;
|
||||
mmu_interval_set_seq(&abo->mem.notifier, cur_seq);
|
||||
up_write(&xdna->notifier_lock);
|
||||
ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
|
||||
true, MAX_SCHEDULE_TIMEOUT);
|
||||
if (!ret || ret == -ERESTARTSYS)
|
||||
|
||||
@@ -9,7 +9,10 @@
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_gem_shmem_helper.h>
|
||||
#include <drm/gpu_scheduler.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/iosys-map.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "amdxdna_ctx.h"
|
||||
@@ -18,6 +21,8 @@
|
||||
|
||||
#define XDNA_MAX_CMD_BO_SIZE SZ_32K
|
||||
|
||||
MODULE_IMPORT_NS("DMA_BUF");
|
||||
|
||||
static int
|
||||
amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
|
||||
{
|
||||
@@ -55,6 +60,306 @@ amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
|
||||
const struct mmu_notifier_range *range,
|
||||
unsigned long cur_seq)
|
||||
{
|
||||
struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier);
|
||||
struct amdxdna_gem_obj *abo = mapp->abo;
|
||||
struct amdxdna_dev *xdna;
|
||||
|
||||
xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d",
|
||||
mapp->vma->vm_start, mapp->vma->vm_end, abo->type);
|
||||
|
||||
if (!mmu_notifier_range_blockable(range))
|
||||
return false;
|
||||
|
||||
down_write(&xdna->notifier_lock);
|
||||
abo->mem.map_invalid = true;
|
||||
mapp->invalid = true;
|
||||
mmu_interval_set_seq(&mapp->notifier, cur_seq);
|
||||
up_write(&xdna->notifier_lock);
|
||||
|
||||
xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
|
||||
|
||||
if (range->event == MMU_NOTIFY_UNMAP) {
|
||||
down_write(&xdna->notifier_lock);
|
||||
if (!mapp->unmapped) {
|
||||
queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
|
||||
mapp->unmapped = true;
|
||||
}
|
||||
up_write(&xdna->notifier_lock);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
|
||||
.invalidate = amdxdna_hmm_invalidate,
|
||||
};
|
||||
|
||||
static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
struct amdxdna_umap *mapp;
|
||||
|
||||
down_read(&xdna->notifier_lock);
|
||||
list_for_each_entry(mapp, &abo->mem.umap_list, node) {
|
||||
if (!vma || mapp->vma == vma) {
|
||||
if (!mapp->unmapped) {
|
||||
queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
|
||||
mapp->unmapped = true;
|
||||
}
|
||||
if (vma)
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_read(&xdna->notifier_lock);
|
||||
}
|
||||
|
||||
static void amdxdna_umap_release(struct kref *ref)
|
||||
{
|
||||
struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt);
|
||||
struct vm_area_struct *vma = mapp->vma;
|
||||
struct amdxdna_dev *xdna;
|
||||
|
||||
mmu_interval_notifier_remove(&mapp->notifier);
|
||||
if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping)
|
||||
mapping_clear_unevictable(vma->vm_file->f_mapping);
|
||||
|
||||
xdna = to_xdna_dev(to_gobj(mapp->abo)->dev);
|
||||
down_write(&xdna->notifier_lock);
|
||||
list_del(&mapp->node);
|
||||
up_write(&xdna->notifier_lock);
|
||||
|
||||
kvfree(mapp->range.hmm_pfns);
|
||||
kfree(mapp);
|
||||
}
|
||||
|
||||
void amdxdna_umap_put(struct amdxdna_umap *mapp)
|
||||
{
|
||||
kref_put(&mapp->refcnt, amdxdna_umap_release);
|
||||
}
|
||||
|
||||
static void amdxdna_hmm_unreg_work(struct work_struct *work)
|
||||
{
|
||||
struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap,
|
||||
hmm_unreg_work);
|
||||
|
||||
amdxdna_umap_put(mapp);
|
||||
}
|
||||
|
||||
static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
unsigned long len = vma->vm_end - vma->vm_start;
|
||||
unsigned long addr = vma->vm_start;
|
||||
struct amdxdna_umap *mapp;
|
||||
u32 nr_pages;
|
||||
int ret;
|
||||
|
||||
if (!xdna->dev_info->ops->hmm_invalidate)
|
||||
return 0;
|
||||
|
||||
mapp = kzalloc(sizeof(*mapp), GFP_KERNEL);
|
||||
if (!mapp)
|
||||
return -ENOMEM;
|
||||
|
||||
nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
|
||||
mapp->range.hmm_pfns = kvcalloc(nr_pages, sizeof(*mapp->range.hmm_pfns),
|
||||
GFP_KERNEL);
|
||||
if (!mapp->range.hmm_pfns) {
|
||||
ret = -ENOMEM;
|
||||
goto free_map;
|
||||
}
|
||||
|
||||
ret = mmu_interval_notifier_insert_locked(&mapp->notifier,
|
||||
current->mm,
|
||||
addr,
|
||||
len,
|
||||
&amdxdna_hmm_ops);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
|
||||
goto free_pfns;
|
||||
}
|
||||
|
||||
mapp->range.notifier = &mapp->notifier;
|
||||
mapp->range.start = vma->vm_start;
|
||||
mapp->range.end = vma->vm_end;
|
||||
mapp->range.default_flags = HMM_PFN_REQ_FAULT;
|
||||
mapp->vma = vma;
|
||||
mapp->abo = abo;
|
||||
kref_init(&mapp->refcnt);
|
||||
|
||||
if (abo->mem.userptr == AMDXDNA_INVALID_ADDR)
|
||||
abo->mem.userptr = addr;
|
||||
INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work);
|
||||
if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping)
|
||||
mapping_set_unevictable(vma->vm_file->f_mapping);
|
||||
|
||||
down_write(&xdna->notifier_lock);
|
||||
list_add_tail(&mapp->node, &abo->mem.umap_list);
|
||||
up_write(&xdna->notifier_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
free_pfns:
|
||||
kvfree(mapp->range.hmm_pfns);
|
||||
free_map:
|
||||
kfree(mapp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
unsigned long num_pages = vma_pages(vma);
|
||||
unsigned long offset = 0;
|
||||
int ret;
|
||||
|
||||
if (!is_import_bo(abo)) {
|
||||
ret = drm_gem_shmem_mmap(&abo->base, vma);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Failed shmem mmap %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* The buffer is based on memory pages. Fix the flag. */
|
||||
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
|
||||
ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
|
||||
&num_pages);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Failed insert pages %d", ret);
|
||||
vma->vm_ops->close(vma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
vma->vm_private_data = NULL;
|
||||
vma->vm_ops = NULL;
|
||||
ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
do {
|
||||
vm_fault_t fault_ret;
|
||||
|
||||
fault_ret = handle_mm_fault(vma, vma->vm_start + offset,
|
||||
FAULT_FLAG_WRITE, NULL);
|
||||
if (fault_ret & VM_FAULT_ERROR) {
|
||||
vma->vm_ops->close(vma);
|
||||
XDNA_ERR(xdna, "Fault in page failed");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
offset += PAGE_SIZE;
|
||||
} while (--num_pages);
|
||||
|
||||
/* Drop the reference drm_gem_mmap_obj() acquired.*/
|
||||
drm_gem_object_put(to_gobj(abo));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
|
||||
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
|
||||
int ret;
|
||||
|
||||
ret = amdxdna_hmm_register(abo, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = amdxdna_insert_pages(abo, vma);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Failed insert pages, ret %d", ret);
|
||||
goto hmm_unreg;
|
||||
}
|
||||
|
||||
XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx",
|
||||
drm_vma_node_offset_addr(&gobj->vma_node), abo->type,
|
||||
vma->vm_start, gobj->size);
|
||||
return 0;
|
||||
|
||||
hmm_unreg:
|
||||
amdxdna_hmm_unregister(abo, vma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_gem_object *gobj = dma_buf->priv;
|
||||
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
|
||||
unsigned long num_pages = vma_pages(vma);
|
||||
int ret;
|
||||
|
||||
vma->vm_ops = &drm_gem_shmem_vm_ops;
|
||||
vma->vm_private_data = gobj;
|
||||
|
||||
drm_gem_object_get(gobj);
|
||||
ret = drm_gem_shmem_mmap(&abo->base, vma);
|
||||
if (ret)
|
||||
goto put_obj;
|
||||
|
||||
/* The buffer is based on memory pages. Fix the flag. */
|
||||
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
|
||||
ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
|
||||
&num_pages);
|
||||
if (ret)
|
||||
goto close_vma;
|
||||
|
||||
return 0;
|
||||
|
||||
close_vma:
|
||||
vma->vm_ops->close(vma);
|
||||
put_obj:
|
||||
drm_gem_object_put(gobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dma_buf_ops amdxdna_dmabuf_ops = {
|
||||
.attach = drm_gem_map_attach,
|
||||
.detach = drm_gem_map_detach,
|
||||
.map_dma_buf = drm_gem_map_dma_buf,
|
||||
.unmap_dma_buf = drm_gem_unmap_dma_buf,
|
||||
.release = drm_gem_dmabuf_release,
|
||||
.mmap = amdxdna_gem_dmabuf_mmap,
|
||||
.vmap = drm_gem_dmabuf_vmap,
|
||||
.vunmap = drm_gem_dmabuf_vunmap,
|
||||
};
|
||||
|
||||
static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
|
||||
{
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
exp_info.ops = &amdxdna_dmabuf_ops;
|
||||
exp_info.size = gobj->size;
|
||||
exp_info.flags = flags;
|
||||
exp_info.priv = gobj;
|
||||
exp_info.resv = gobj->resv;
|
||||
|
||||
return drm_gem_dmabuf_export(gobj->dev, &exp_info);
|
||||
}
|
||||
|
||||
static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo)
|
||||
{
|
||||
dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL);
|
||||
dma_buf_detach(abo->dma_buf, abo->attach);
|
||||
dma_buf_put(abo->dma_buf);
|
||||
drm_gem_object_release(to_gobj(abo));
|
||||
kfree(abo);
|
||||
}
|
||||
|
||||
static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
|
||||
@@ -62,6 +367,10 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
|
||||
struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
|
||||
|
||||
XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
|
||||
|
||||
amdxdna_hmm_unregister(abo, NULL);
|
||||
flush_workqueue(xdna->notifier_wq);
|
||||
|
||||
if (abo->pinned)
|
||||
amdxdna_gem_unpin(abo);
|
||||
|
||||
@@ -83,6 +392,12 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
|
||||
|
||||
drm_gem_vunmap(gobj, &map);
|
||||
mutex_destroy(&abo->lock);
|
||||
|
||||
if (is_import_bo(abo)) {
|
||||
amdxdna_imported_obj_free(abo);
|
||||
return;
|
||||
}
|
||||
|
||||
drm_gem_shmem_free(&abo->base);
|
||||
}
|
||||
|
||||
@@ -90,127 +405,6 @@ static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
|
||||
.free = amdxdna_gem_obj_free,
|
||||
};
|
||||
|
||||
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
|
||||
const struct mmu_notifier_range *range,
|
||||
unsigned long cur_seq)
|
||||
{
|
||||
struct amdxdna_gem_obj *abo = container_of(mni, struct amdxdna_gem_obj,
|
||||
mem.notifier);
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
|
||||
XDNA_DBG(xdna, "Invalid range 0x%llx, 0x%lx, type %d",
|
||||
abo->mem.userptr, abo->mem.size, abo->type);
|
||||
|
||||
if (!mmu_notifier_range_blockable(range))
|
||||
return false;
|
||||
|
||||
xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
|
||||
.invalidate = amdxdna_hmm_invalidate,
|
||||
};
|
||||
|
||||
static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
|
||||
if (!xdna->dev_info->ops->hmm_invalidate)
|
||||
return;
|
||||
|
||||
mmu_interval_notifier_remove(&abo->mem.notifier);
|
||||
kvfree(abo->mem.pfns);
|
||||
abo->mem.pfns = NULL;
|
||||
}
|
||||
|
||||
static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, unsigned long addr,
|
||||
size_t len)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
u32 nr_pages;
|
||||
int ret;
|
||||
|
||||
if (!xdna->dev_info->ops->hmm_invalidate)
|
||||
return 0;
|
||||
|
||||
if (abo->mem.pfns)
|
||||
return -EEXIST;
|
||||
|
||||
nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
|
||||
abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns),
|
||||
GFP_KERNEL);
|
||||
if (!abo->mem.pfns)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = mmu_interval_notifier_insert_locked(&abo->mem.notifier,
|
||||
current->mm,
|
||||
addr,
|
||||
len,
|
||||
&amdxdna_hmm_ops);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
|
||||
kvfree(abo->mem.pfns);
|
||||
}
|
||||
abo->mem.userptr = addr;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
|
||||
unsigned long num_pages;
|
||||
int ret;
|
||||
|
||||
ret = amdxdna_hmm_register(abo, vma->vm_start, gobj->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_gem_shmem_mmap(&abo->base, vma);
|
||||
if (ret)
|
||||
goto hmm_unreg;
|
||||
|
||||
num_pages = gobj->size >> PAGE_SHIFT;
|
||||
/* Try to insert the pages */
|
||||
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
|
||||
ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, &num_pages);
|
||||
if (ret)
|
||||
XDNA_ERR(abo->client->xdna, "Failed insert pages, ret %d", ret);
|
||||
|
||||
return 0;
|
||||
|
||||
hmm_unreg:
|
||||
amdxdna_hmm_unregister(abo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static vm_fault_t amdxdna_gem_vm_fault(struct vm_fault *vmf)
|
||||
{
|
||||
return drm_gem_shmem_vm_ops.fault(vmf);
|
||||
}
|
||||
|
||||
static void amdxdna_gem_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_gem_shmem_vm_ops.open(vma);
|
||||
}
|
||||
|
||||
static void amdxdna_gem_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_gem_object *gobj = vma->vm_private_data;
|
||||
|
||||
amdxdna_hmm_unregister(to_xdna_obj(gobj));
|
||||
drm_gem_shmem_vm_ops.close(vma);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct amdxdna_gem_vm_ops = {
|
||||
.fault = amdxdna_gem_vm_fault,
|
||||
.open = amdxdna_gem_vm_open,
|
||||
.close = amdxdna_gem_vm_close,
|
||||
};
|
||||
|
||||
static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
|
||||
.free = amdxdna_gem_obj_free,
|
||||
.print_info = drm_gem_shmem_object_print_info,
|
||||
@@ -220,7 +414,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
|
||||
.vmap = drm_gem_shmem_object_vmap,
|
||||
.vunmap = drm_gem_shmem_object_vunmap,
|
||||
.mmap = amdxdna_gem_obj_mmap,
|
||||
.vm_ops = &amdxdna_gem_vm_ops,
|
||||
.vm_ops = &drm_gem_shmem_vm_ops,
|
||||
.export = amdxdna_gem_prime_export,
|
||||
};
|
||||
|
||||
static struct amdxdna_gem_obj *
|
||||
@@ -239,6 +434,7 @@ amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
|
||||
abo->mem.userptr = AMDXDNA_INVALID_ADDR;
|
||||
abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
|
||||
abo->mem.size = size;
|
||||
INIT_LIST_HEAD(&abo->mem.umap_list);
|
||||
|
||||
return abo;
|
||||
}
|
||||
@@ -258,6 +454,51 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
|
||||
return to_gobj(abo);
|
||||
}
|
||||
|
||||
struct drm_gem_object *
|
||||
amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
|
||||
{
|
||||
struct dma_buf_attachment *attach;
|
||||
struct amdxdna_gem_obj *abo;
|
||||
struct drm_gem_object *gobj;
|
||||
struct sg_table *sgt;
|
||||
int ret;
|
||||
|
||||
get_dma_buf(dma_buf);
|
||||
|
||||
attach = dma_buf_attach(dma_buf, dev->dev);
|
||||
if (IS_ERR(attach)) {
|
||||
ret = PTR_ERR(attach);
|
||||
goto put_buf;
|
||||
}
|
||||
|
||||
sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sgt)) {
|
||||
ret = PTR_ERR(sgt);
|
||||
goto fail_detach;
|
||||
}
|
||||
|
||||
gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
|
||||
if (IS_ERR(gobj)) {
|
||||
ret = PTR_ERR(gobj);
|
||||
goto fail_unmap;
|
||||
}
|
||||
|
||||
abo = to_xdna_obj(gobj);
|
||||
abo->attach = attach;
|
||||
abo->dma_buf = dma_buf;
|
||||
|
||||
return gobj;
|
||||
|
||||
fail_unmap:
|
||||
dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
|
||||
fail_detach:
|
||||
dma_buf_detach(dma_buf, attach);
|
||||
put_buf:
|
||||
dma_buf_put(dma_buf);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static struct amdxdna_gem_obj *
|
||||
amdxdna_drm_alloc_shmem(struct drm_device *dev,
|
||||
struct amdxdna_drm_create_bo *args,
|
||||
@@ -483,6 +724,9 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
int ret;
|
||||
|
||||
if (is_import_bo(abo))
|
||||
return 0;
|
||||
|
||||
switch (abo->type) {
|
||||
case AMDXDNA_BO_SHMEM:
|
||||
case AMDXDNA_BO_DEV_HEAP:
|
||||
@@ -515,6 +759,9 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
|
||||
|
||||
void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
|
||||
{
|
||||
if (is_import_bo(abo))
|
||||
return;
|
||||
|
||||
if (abo->type == AMDXDNA_BO_DEV)
|
||||
abo = abo->dev_heap;
|
||||
|
||||
@@ -606,7 +853,9 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
|
||||
goto put_obj;
|
||||
}
|
||||
|
||||
if (abo->type == AMDXDNA_BO_DEV)
|
||||
if (is_import_bo(abo))
|
||||
drm_clflush_sg(abo->base.sgt);
|
||||
else if (abo->type == AMDXDNA_BO_DEV)
|
||||
drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
|
||||
else
|
||||
drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
|
||||
|
||||
@@ -6,6 +6,20 @@
|
||||
#ifndef _AMDXDNA_GEM_H_
|
||||
#define _AMDXDNA_GEM_H_
|
||||
|
||||
#include <linux/hmm.h>
|
||||
|
||||
struct amdxdna_umap {
|
||||
struct vm_area_struct *vma;
|
||||
struct mmu_interval_notifier notifier;
|
||||
struct hmm_range range;
|
||||
struct work_struct hmm_unreg_work;
|
||||
struct amdxdna_gem_obj *abo;
|
||||
struct list_head node;
|
||||
struct kref refcnt;
|
||||
bool invalid;
|
||||
bool unmapped;
|
||||
};
|
||||
|
||||
struct amdxdna_mem {
|
||||
u64 userptr;
|
||||
void *kva;
|
||||
@@ -13,8 +27,7 @@ struct amdxdna_mem {
|
||||
size_t size;
|
||||
struct page **pages;
|
||||
u32 nr_pages;
|
||||
struct mmu_interval_notifier notifier;
|
||||
unsigned long *pfns;
|
||||
struct list_head umap_list;
|
||||
bool map_invalid;
|
||||
};
|
||||
|
||||
@@ -31,9 +44,12 @@ struct amdxdna_gem_obj {
|
||||
struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */
|
||||
struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
|
||||
u32 assigned_hwctx;
|
||||
struct dma_buf *dma_buf;
|
||||
struct dma_buf_attachment *attach;
|
||||
};
|
||||
|
||||
#define to_gobj(obj) (&(obj)->base.base)
|
||||
#define is_import_bo(obj) ((obj)->attach)
|
||||
|
||||
static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj)
|
||||
{
|
||||
@@ -47,8 +63,12 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
|
||||
drm_gem_object_put(to_gobj(abo));
|
||||
}
|
||||
|
||||
void amdxdna_umap_put(struct amdxdna_umap *mapp);
|
||||
|
||||
struct drm_gem_object *
|
||||
amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size);
|
||||
struct drm_gem_object *
|
||||
amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
|
||||
struct amdxdna_gem_obj *
|
||||
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
|
||||
struct amdxdna_drm_create_bo *args,
|
||||
|
||||
@@ -226,6 +226,7 @@ const struct drm_driver amdxdna_drm_drv = {
|
||||
.num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
|
||||
|
||||
.gem_create_object = amdxdna_gem_create_object_cb,
|
||||
.gem_prime_import = amdxdna_gem_prime_import,
|
||||
};
|
||||
|
||||
static const struct amdxdna_dev_info *
|
||||
@@ -266,12 +267,16 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
fs_reclaim_release(GFP_KERNEL);
|
||||
}
|
||||
|
||||
xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", 0);
|
||||
if (!xdna->notifier_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&xdna->dev_lock);
|
||||
ret = xdna->dev_info->ops->init(xdna);
|
||||
mutex_unlock(&xdna->dev_lock);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
|
||||
return ret;
|
||||
goto destroy_notifier_wq;
|
||||
}
|
||||
|
||||
ret = amdxdna_sysfs_init(xdna);
|
||||
@@ -301,6 +306,8 @@ failed_dev_fini:
|
||||
mutex_lock(&xdna->dev_lock);
|
||||
xdna->dev_info->ops->fini(xdna);
|
||||
mutex_unlock(&xdna->dev_lock);
|
||||
destroy_notifier_wq:
|
||||
destroy_workqueue(xdna->notifier_wq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -310,6 +317,8 @@ static void amdxdna_remove(struct pci_dev *pdev)
|
||||
struct device *dev = &pdev->dev;
|
||||
struct amdxdna_client *client;
|
||||
|
||||
destroy_workqueue(xdna->notifier_wq);
|
||||
|
||||
pm_runtime_get_noresume(dev);
|
||||
pm_runtime_forbid(dev);
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#ifndef _AMDXDNA_PCI_DRV_H_
|
||||
#define _AMDXDNA_PCI_DRV_H_
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
#define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args)
|
||||
@@ -98,6 +99,7 @@ struct amdxdna_dev {
|
||||
struct list_head client_list;
|
||||
struct amdxdna_fw_ver fw_ver;
|
||||
struct rw_semaphore notifier_lock; /* for mmu notifier*/
|
||||
struct workqueue_struct *notifier_wq;
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user