Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7d840a60 authored by David S. Miller's avatar David S. Miller
Browse files
parents d67b66b4 885892fb
Loading
Loading
Loading
Loading
+12 −6
Original line number Diff line number Diff line
@@ -43,12 +43,13 @@
#include "fw.h"

/*
 * We allocate in page size (default 4KB on many archs) chunks to avoid high
 * order memory allocations in fragmented/high usage memory situation.
 * We allocate in as big chunks as we can, up to a maximum of 256 KB
 * per chunk. Note that the chunks are not necessarily in contiguous
 * physical memory.
 */
enum {
	MLX4_ICM_ALLOC_SIZE	= PAGE_SIZE,
	MLX4_TABLE_CHUNK_SIZE	= PAGE_SIZE,
	MLX4_ICM_ALLOC_SIZE	= 1 << 18,
	MLX4_TABLE_CHUNK_SIZE	= 1 << 18,
};

static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -135,6 +136,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
	struct mlx4_icm *icm;
	struct mlx4_icm_chunk *chunk = NULL;
	int cur_order;
	gfp_t mask;
	int ret;

	/* We use sg_set_buf for coherent allocs, which assumes low memory */
@@ -178,13 +180,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
		while (1 << cur_order > npages)
			--cur_order;

		mask = gfp_mask;
		if (cur_order)
			mask &= ~__GFP_DIRECT_RECLAIM;

		if (coherent)
			ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
						      &chunk->mem[chunk->npages],
						      cur_order, gfp_mask);
						      cur_order, mask);
		else
			ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
						   cur_order, gfp_mask,
						   cur_order, mask,
						   dev->numa_node);

		if (ret) {