firewire: core: stop using page private to store DMA mapping address

There is a long discussion about the use of private field in page
structure between Linux kernel developers.

This commit stop using page private to store DMA mapping address for
isochronous context, to prepare for mm future change.

Link: https://lore.kernel.org/r/20260110013911.19160-6-o-takashi@sakamocchi.jp
Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
This commit is contained in:
Takashi Sakamoto
2026-01-10 10:39:08 +09:00
parent c2f60aa160
commit ef6bdffbb8
4 changed files with 39 additions and 40 deletions

View File

@@ -67,7 +67,6 @@ struct client {
u64 iso_closure;
struct fw_iso_buffer buffer;
unsigned long vm_start;
bool buffer_is_mapped;
struct list_head phy_receiver_link;
u64 phy_receiver_closure;
@@ -1098,7 +1097,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
}
// The DMA mapping operation is available if the buffer is already allocated by
// mmap(2) system call. If not, it is delegated to the system call.
if (!client->buffer_is_mapped) {
if (client->buffer.pages && !client->buffer.dma_addrs) {
ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card,
iso_dma_direction(context));
if (ret < 0) {
@@ -1106,7 +1105,6 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
return ret;
}
client->buffer_is_mapped = true;
}
client->iso_closure = a->closure;
client->iso_context = context;
@@ -1837,7 +1835,6 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
iso_dma_direction(client->iso_context));
if (ret < 0)
goto fail;
client->buffer_is_mapped = true;
}
}

View File

@@ -55,25 +55,32 @@ int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count)
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction)
{
dma_addr_t address;
dma_addr_t *dma_addrs __free(kfree) = kcalloc(buffer->page_count, sizeof(dma_addrs[0]),
GFP_KERNEL);
int i;
buffer->direction = direction;
if (!dma_addrs)
return -ENOMEM;
// Retrieve DMA mapping addresses for the pages. They are not contiguous. Maintain the cache
// coherency for the pages by hand.
for (i = 0; i < buffer->page_count; i++) {
// The dma_map_phys() with a physical address per page is available here, instead.
address = dma_map_page(card->device, buffer->pages[i],
0, PAGE_SIZE, direction);
if (dma_mapping_error(card->device, address))
dma_addr_t dma_addr = dma_map_page(card->device, buffer->pages[i], 0, PAGE_SIZE,
direction);
if (dma_mapping_error(card->device, dma_addr))
break;
set_page_private(buffer->pages[i], address);
dma_addrs[i] = dma_addr;
}
buffer->page_count_mapped = i;
if (i < buffer->page_count)
if (i < buffer->page_count) {
while (i-- > 0)
dma_unmap_page(card->device, dma_addrs[i], PAGE_SIZE, buffer->direction);
return -ENOMEM;
}
buffer->direction = direction;
buffer->dma_addrs = no_free_ptr(dma_addrs);
return 0;
}
@@ -98,13 +105,13 @@ EXPORT_SYMBOL(fw_iso_buffer_init);
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
struct fw_card *card)
{
int i;
dma_addr_t address;
for (i = 0; i < buffer->page_count_mapped; i++) {
address = page_private(buffer->pages[i]);
dma_unmap_page(card->device, address,
PAGE_SIZE, buffer->direction);
if (buffer->dma_addrs) {
for (int i = 0; i < buffer->page_count; ++i) {
dma_addr_t dma_addr = buffer->dma_addrs[i];
dma_unmap_page(card->device, dma_addr, PAGE_SIZE, buffer->direction);
}
kfree(buffer->dma_addrs);
buffer->dma_addrs = NULL;
}
if (buffer->pages) {
@@ -114,20 +121,15 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
}
buffer->page_count = 0;
buffer->page_count_mapped = 0;
}
EXPORT_SYMBOL(fw_iso_buffer_destroy);
/* Convert DMA address to offset into virtually contiguous buffer. */
size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
{
size_t i;
dma_addr_t address;
ssize_t offset;
for (i = 0; i < buffer->page_count; i++) {
address = page_private(buffer->pages[i]);
offset = (ssize_t)completed - (ssize_t)address;
for (int i = 0; i < buffer->page_count; i++) {
dma_addr_t dma_addr = buffer->dma_addrs[i];
ssize_t offset = (ssize_t)completed - (ssize_t)dma_addr;
if (offset > 0 && offset <= PAGE_SIZE)
return (i << PAGE_SHIFT) + offset;
}

View File

@@ -3184,7 +3184,7 @@ static int queue_iso_transmit(struct iso_context *ctx,
struct descriptor *d, *last, *pd;
struct fw_iso_packet *p;
__le32 *header;
dma_addr_t d_bus, page_bus;
dma_addr_t d_bus;
u32 z, header_z, payload_z, irq;
u32 payload_index, payload_end_index, next_page_index;
int page, end_page, i, length, offset;
@@ -3254,11 +3254,11 @@ static int queue_iso_transmit(struct iso_context *ctx,
min(next_page_index, payload_end_index) - payload_index;
pd[i].req_count = cpu_to_le16(length);
page_bus = page_private(buffer->pages[page]);
pd[i].data_address = cpu_to_le32(page_bus + offset);
dma_addr_t dma_addr = buffer->dma_addrs[i];
pd[i].data_address = cpu_to_le32(dma_addr + offset);
dma_sync_single_range_for_device(ctx->context.ohci->card.device,
page_bus, offset, length,
dma_addr, offset, length,
DMA_TO_DEVICE);
payload_index += length;
@@ -3287,7 +3287,7 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
{
struct device *device = ctx->context.ohci->card.device;
struct descriptor *d, *pd;
dma_addr_t d_bus, page_bus;
dma_addr_t d_bus;
u32 z, header_z, rest;
int i, j, length;
int page, offset, packet_count, header_size, payload_per_buffer;
@@ -3337,10 +3337,10 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
pd->res_count = pd->req_count;
pd->transfer_status = 0;
page_bus = page_private(buffer->pages[page]);
pd->data_address = cpu_to_le32(page_bus + offset);
dma_addr_t dma_addr = buffer->dma_addrs[page];
pd->data_address = cpu_to_le32(dma_addr + offset);
dma_sync_single_range_for_device(device, page_bus,
dma_sync_single_range_for_device(device, dma_addr,
offset, length,
DMA_FROM_DEVICE);
@@ -3367,7 +3367,7 @@ static int queue_iso_buffer_fill(struct iso_context *ctx,
unsigned long payload)
{
struct descriptor *d;
dma_addr_t d_bus, page_bus;
dma_addr_t d_bus;
int page, offset, rest, z, i, length;
page = payload >> PAGE_SHIFT;
@@ -3400,11 +3400,11 @@ static int queue_iso_buffer_fill(struct iso_context *ctx,
d->res_count = d->req_count;
d->transfer_status = 0;
page_bus = page_private(buffer->pages[page]);
d->data_address = cpu_to_le32(page_bus + offset);
dma_addr_t dma_addr = buffer->dma_addrs[page];
d->data_address = cpu_to_le32(dma_addr + offset);
dma_sync_single_range_for_device(ctx->context.ohci->card.device,
page_bus, offset, length,
dma_addr, offset, length,
DMA_FROM_DEVICE);
rest -= length;

View File

@@ -526,8 +526,8 @@ struct fw_iso_packet {
struct fw_iso_buffer {
enum dma_data_direction direction;
struct page **pages;
dma_addr_t *dma_addrs;
int page_count;
int page_count_mapped;
};
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,