firewire: core: reallocate buffer for FCP address handlers when more than 4 are registered

The former commit has a limitation that only up to 4 FCP address
handlers could be processed per request. Although it suffices for most
use cases, it is technically a regression.

This commit lifts the restriction by reallocating the buffer from kernel
heap when more than 4 handlers are registered. The allocation is performed
within RCU read-side critical section, thus it uses GCP_ATOMIC flag. The
buffer size is rounded up to the next power of two to align with kmalloc
allocation units.

Link: https://lore.kernel.org/r/20250803122015.236493-5-o-takashi@sakamocchi.jp
Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
pull/1082/merge
Takashi Sakamoto 2025-08-03 21:20:15 +09:00
parent e884a8a0c5
commit 0342273e14
1 changed files with 32 additions and 4 deletions

View File

@ -960,7 +960,7 @@ static void handle_fcp_region_request(struct fw_card *card,
{
struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE];
struct fw_address_handler *handler, **handlers;
int tcode, destination, source, i, count;
int tcode, destination, source, i, count, buffer_size;
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
@ -983,13 +983,38 @@ static void handle_fcp_region_request(struct fw_card *card,
count = 0;
handlers = buffer_on_kernel_stack;
buffer_size = ARRAY_SIZE(buffer_on_kernel_stack);
scoped_guard(rcu) {
list_for_each_entry_rcu(handler, &address_handler_list, link) {
if (is_enclosing_handler(handler, offset, request->length)) {
if (count >= buffer_size) {
int next_size = buffer_size * 2;
struct fw_address_handler **buffer_on_kernel_heap;
if (handlers == buffer_on_kernel_stack)
buffer_on_kernel_heap = NULL;
else
buffer_on_kernel_heap = handlers;
buffer_on_kernel_heap =
krealloc_array(buffer_on_kernel_heap, next_size,
sizeof(*buffer_on_kernel_heap), GFP_ATOMIC);
// FCP is used for purposes unrelated to significant system
// resources (e.g. storage or networking), so allocation
// failures are not considered so critical.
if (!buffer_on_kernel_heap)
break;
if (handlers == buffer_on_kernel_stack) {
memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack,
sizeof(buffer_on_kernel_stack));
}
handlers = buffer_on_kernel_heap;
buffer_size = next_size;
}
get_address_handler(handler);
handlers[count] = handler;
if (++count >= ARRAY_SIZE(buffer_on_kernel_stack))
break;
handlers[count++] = handler;
}
}
}
@ -1002,6 +1027,9 @@ static void handle_fcp_region_request(struct fw_card *card,
put_address_handler(handler);
}
if (handlers != buffer_on_kernel_stack)
kfree(handlers);
fw_send_response(card, request, RCODE_COMPLETE);
}