Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 60458fba authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/mm: Add helpers for accessing hash translation related variables



We want to switch to allocating them runtime only when hash translation is
enabled. Add helpers so that both book3s and nohash can be adapted to
upcoming change easily.

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 4f40b15f
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -657,8 +657,8 @@ extern void slb_set_size(u16 size);

/* 4 bits per slice and we have one slice per 1TB */
#define SLICE_ARRAY_SIZE	(H_PGTABLE_RANGE >> 41)
#define TASK_SLICE_ARRAY_SZ(x)	((x)->context.slb_addr_limit >> 41)

#define LOW_SLICE_ARRAY_SZ	(BITS_PER_LONG / BITS_PER_BYTE)
#define TASK_SLICE_ARRAY_SZ(x)	((x)->slb_addr_limit >> 41)
#ifndef __ASSEMBLY__

#ifdef CONFIG_PPC_SUBPAGE_PROT
+62 −1
Original line number Diff line number Diff line
@@ -139,7 +139,7 @@ typedef struct {
	struct npu_context *npu_context;

	 /* SLB page size encodings*/
	unsigned char low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
	unsigned char low_slices_psize[LOW_SLICE_ARRAY_SZ];
	unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
	unsigned long slb_addr_limit;
# ifdef CONFIG_PPC_64K_PAGES
@@ -174,6 +174,67 @@ typedef struct {
#endif
} mm_context_t;

static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
{
	return ctx->user_psize;
}

static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
{
	ctx->user_psize = user_psize;
}

static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
{
	return ctx->low_slices_psize;
}

static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
{
	return ctx->high_slices_psize;
}

static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
{
	return ctx->slb_addr_limit;
}

static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
{
	ctx->slb_addr_limit = limit;
}

#ifdef CONFIG_PPC_64K_PAGES
static inline struct slice_mask *mm_ctx_slice_mask_64k(mm_context_t *ctx)
{
	return &ctx->mask_64k;
}
#endif

static inline struct slice_mask *mm_ctx_slice_mask_4k(mm_context_t *ctx)
{
	return &ctx->mask_4k;
}

#ifdef CONFIG_HUGETLB_PAGE
static inline struct slice_mask *mm_ctx_slice_mask_16m(mm_context_t *ctx)
{
	return &ctx->mask_16m;
}

static inline struct slice_mask *mm_ctx_slice_mask_16g(mm_context_t *ctx)
{
	return &ctx->mask_16g;
}
#endif

#ifdef CONFIG_PPC_SUBPAGE_PROT
static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
{
	return &ctx->spt;
}
#endif

/*
 * The current system page and segment sizes
 */
+50 −0
Original line number Diff line number Diff line
@@ -181,6 +181,7 @@
#ifdef CONFIG_PPC_MM_SLICES
#include <asm/nohash/32/slice.h>
#define SLICE_ARRAY_SIZE	(1 << (32 - SLICE_LOW_SHIFT - 1))
#define LOW_SLICE_ARRAY_SZ	SLICE_ARRAY_SIZE
#endif

#ifndef __ASSEMBLY__
@@ -207,6 +208,55 @@ typedef struct {
	void *pte_frag;
} mm_context_t;

#ifdef CONFIG_PPC_MM_SLICES
static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
{
	return ctx->user_psize;
}

static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
{
	ctx->user_psize = user_psize;
}

static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
{
	return ctx->low_slices_psize;
}

static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
{
	return ctx->high_slices_psize;
}

static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
{
	return ctx->slb_addr_limit;
}

static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
{
	ctx->slb_addr_limit = limit;
}

static inline struct slice_mask *mm_ctx_slice_mask_base(mm_context_t *ctx)
{
	return &ctx->mask_base_psize;
}

#ifdef CONFIG_HUGETLB_PAGE
static inline struct slice_mask *mm_ctx_slice_mask_512k(mm_context_t *ctx)
{
	return &ctx->mask_512k;
}

static inline struct slice_mask *mm_ctx_slice_mask_8m(mm_context_t *ctx)
{
	return &ctx->mask_8m;
}
#endif
#endif /* CONFIG_PPC_MM_SLICE */

#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))

+6 −6
Original line number Diff line number Diff line
@@ -267,12 +267,12 @@ void copy_mm_to_paca(struct mm_struct *mm)

	get_paca()->mm_ctx_id = context->id;
#ifdef CONFIG_PPC_MM_SLICES
	VM_BUG_ON(!mm->context.slb_addr_limit);
	get_paca()->mm_ctx_slb_addr_limit = mm->context.slb_addr_limit;
	memcpy(&get_paca()->mm_ctx_low_slices_psize,
	       &context->low_slices_psize, sizeof(context->low_slices_psize));
	memcpy(&get_paca()->mm_ctx_high_slices_psize,
	       &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm));
	VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
	get_paca()->mm_ctx_slb_addr_limit = mm_ctx_slb_addr_limit(context);
	memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
	       LOW_SLICE_ARRAY_SZ);
	memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
	       TASK_SLICE_ARRAY_SZ(context));
#else /* CONFIG_PPC_MM_SLICES */
	get_paca()->mm_ctx_user_psize = context->user_psize;
	get_paca()->mm_ctx_sllp = context->sllp;
+5 −5
Original line number Diff line number Diff line
@@ -1142,7 +1142,7 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
 */
static int subpage_protection(struct mm_struct *mm, unsigned long ea)
{
	struct subpage_prot_table *spt = &mm->context.spt;
	struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
	u32 spp = 0;
	u32 **sbpm, *sbpp;

@@ -1465,7 +1465,7 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
	int psize = get_slice_psize(mm, ea);

	/* We only prefault standard pages for now */
	if (unlikely(psize != mm->context.user_psize))
	if (unlikely(psize != mm_ctx_user_psize(&mm->context)))
		return false;

	/*
@@ -1544,7 +1544,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,

	/* Hash it in */
#ifdef CONFIG_PPC_64K_PAGES
	if (mm->context.user_psize == MMU_PAGE_64K)
	if (mm_ctx_user_psize(&mm->context) == MMU_PAGE_64K)
		rc = __hash_page_64K(ea, access, vsid, ptep, trap,
				     update_flags, ssize);
	else
@@ -1557,8 +1557,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
	 */
	if (rc == -1)
		hash_failure_debug(ea, access, vsid, trap, ssize,
				   mm->context.user_psize,
				   mm->context.user_psize,
				   mm_ctx_user_psize(&mm->context),
				   mm_ctx_user_psize(&mm->context),
				   pte_val(*ptep));
out_exit:
	local_irq_restore(flags);
Loading