Loading drivers/net/ethernet/mellanox/mlx4/icm.c +12 −6 Original line number Diff line number Diff line Loading @@ -43,12 +43,13 @@ #include "fw.h" /* * We allocate in page size (default 4KB on many archs) chunks to avoid high * order memory allocations in fragmented/high usage memory situation. * We allocate in as big chunks as we can, up to a maximum of 256 KB * per chunk. Note that the chunks are not necessarily in contiguous * physical memory. */ enum { MLX4_ICM_ALLOC_SIZE = PAGE_SIZE, MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE, MLX4_ICM_ALLOC_SIZE = 1 << 18, MLX4_TABLE_CHUNK_SIZE = 1 << 18, }; static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) Loading Loading @@ -135,6 +136,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, struct mlx4_icm *icm; struct mlx4_icm_chunk *chunk = NULL; int cur_order; gfp_t mask; int ret; /* We use sg_set_buf for coherent allocs, which assumes low memory */ Loading Loading @@ -178,13 +180,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, while (1 << cur_order > npages) --cur_order; mask = gfp_mask; if (cur_order) mask &= ~__GFP_DIRECT_RECLAIM; if (coherent) ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, &chunk->mem[chunk->npages], cur_order, gfp_mask); cur_order, mask); else ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], cur_order, gfp_mask, cur_order, mask, dev->numa_node); if (ret) { Loading Loading
drivers/net/ethernet/mellanox/mlx4/icm.c +12 −6 Original line number Diff line number Diff line Loading @@ -43,12 +43,13 @@ #include "fw.h" /* * We allocate in page size (default 4KB on many archs) chunks to avoid high * order memory allocations in fragmented/high usage memory situation. * We allocate in as big chunks as we can, up to a maximum of 256 KB * per chunk. Note that the chunks are not necessarily in contiguous * physical memory. */ enum { MLX4_ICM_ALLOC_SIZE = PAGE_SIZE, MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE, MLX4_ICM_ALLOC_SIZE = 1 << 18, MLX4_TABLE_CHUNK_SIZE = 1 << 18, }; static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) Loading Loading @@ -135,6 +136,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, struct mlx4_icm *icm; struct mlx4_icm_chunk *chunk = NULL; int cur_order; gfp_t mask; int ret; /* We use sg_set_buf for coherent allocs, which assumes low memory */ Loading Loading @@ -178,13 +180,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, while (1 << cur_order > npages) --cur_order; mask = gfp_mask; if (cur_order) mask &= ~__GFP_DIRECT_RECLAIM; if (coherent) ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, &chunk->mem[chunk->npages], cur_order, gfp_mask); cur_order, mask); else ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], cur_order, gfp_mask, cur_order, mask, dev->numa_node); if (ret) { Loading