Skip to content
Snippets Groups Projects
Commit 7774e392 authored by Johannes Berg's avatar Johannes Berg
Browse files

wifi: iwlwifi: fw: allocate chained SG tables for dump


The firmware dumps can be pretty big, and since we use single
pages for each SG table entry, even the table itself may end
up being an order-5 allocation. Build chained tables so that
we need not allocate a higher-order table here.

This could be improved and cleaned up, e.g. by using the SG
pool code or simply kvmalloc(), but all of that would require
also updating the devcoredump first since that frees it all,
so we need to be more careful. SG pool might also run against
the CONFIG_ARCH_NO_SG_CHAIN limitation, which is irrelevant
here.

Also use _devcd_free_sgtable() for the error paths now, much
simpler especially since it's in two places now.

Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarMiri Korenblit <miriam.rachel.korenblit@intel.com>
Link: https://patch.msgid.link/20250209143303.697c7a465ac9.Iea982df46b5c075bfb77ade36f187d99a70c63db@changeid


Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent 646262c7
No related merge requests found
......@@ -558,41 +558,71 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
}
/*
* alloc_sgtable - allocates scallerlist table in the given size,
* fills it with pages and returns it
* alloc_sgtable - allocates (chained) scatterlist in the given size,
* fills it with pages and returns it
* @size: the size (in bytes) of the table
*/
static struct scatterlist *alloc_sgtable(int size)
*/
static struct scatterlist *alloc_sgtable(ssize_t size)
{
int alloc_size, nents, i;
struct page *new_page;
struct scatterlist *iter;
struct scatterlist *table;
struct scatterlist *result = NULL, *prev;
int nents, i, n_prev;
nents = DIV_ROUND_UP(size, PAGE_SIZE);
table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
if (!table)
return NULL;
sg_init_table(table, nents);
iter = table;
for_each_sg(table, iter, sg_nents(table), i) {
new_page = alloc_page(GFP_KERNEL);
if (!new_page) {
/* release all previous allocated pages in the table */
iter = table;
for_each_sg(table, iter, sg_nents(table), i) {
new_page = sg_page(iter);
if (new_page)
__free_page(new_page);
}
kfree(table);
#define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result))
/*
* We need an additional entry for table chaining,
* this ensures the loop can finish i.e. we can
* fit at least two entries per page (obviously,
* many more really fit.)
*/
BUILD_BUG_ON(N_ENTRIES_PER_PAGE < 2);
while (nents > 0) {
struct scatterlist *new, *iter;
int n_fill, n_alloc;
if (nents <= N_ENTRIES_PER_PAGE) {
/* last needed table */
n_fill = nents;
n_alloc = nents;
nents = 0;
} else {
/* fill a page with entries */
n_alloc = N_ENTRIES_PER_PAGE;
/* reserve one for chaining */
n_fill = n_alloc - 1;
nents -= n_fill;
}
new = kcalloc(n_alloc, sizeof(*new), GFP_KERNEL);
if (!new) {
if (result)
_devcd_free_sgtable(result);
return NULL;
}
alloc_size = min_t(int, size, PAGE_SIZE);
size -= PAGE_SIZE;
sg_set_page(iter, new_page, alloc_size, 0);
sg_init_table(new, n_alloc);
if (!result)
result = new;
else
sg_chain(prev, n_prev, new);
prev = new;
n_prev = n_alloc;
for_each_sg(new, iter, n_fill, i) {
struct page *new_page = alloc_page(GFP_KERNEL);
if (!new_page) {
_devcd_free_sgtable(result);
return NULL;
}
sg_set_page(iter, new_page, PAGE_SIZE, 0);
}
}
return table;
return result;
}
static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment