Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ec8deffa authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'x86-v28-for-linus-phase2-B' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (27 commits)
  x86, cpa: make the kernel physical mapping initialization a two pass sequence, fix
  x86, pat: cleanups
  x86: fix pagetable init 64-bit breakage
  x86: track memtype for RAM in page struct
  x86, cpa: srlz cpa(), global flush tlb after splitting big page and before doing cpa
  x86, cpa: remove cpa pool code
  x86, cpa: no need to check alias for __set_pages_p/__set_pages_np
  x86, cpa: dont use large pages for kernel identity mapping with DEBUG_PAGEALLOC
  x86, cpa: make the kernel physical mapping initialization a two pass sequence
  x86, cpa: remove USER permission from the very early identity mapping attribute
  x86, cpa: rename PTE attribute macros for kernel direct mapping in early boot
  x86: make sure the CPA test code's use of _PAGE_UNUSED1 is obvious
  linux-next: fix x86 tree build failure
  x86: have set_memory_array_{uc,wb} coalesce memtypes, fix
  agp: enable optimized agp_alloc_pages methods
  x86: have set_memory_array_{uc,wb} coalesce memtypes.
  x86: {reverve,free}_memtype() take a physical address
  x86: fix pageattr-test
  agp: add agp_generic_destroy_pages()
  agp: generic_alloc_pages()
  ...
parents 7cc4e87f 3dd392a4
Loading
Loading
Loading
Loading
+15 −19
Original line number Diff line number Diff line
@@ -172,10 +172,6 @@ num_subarch_entries = (. - subarch_entries) / 4
 *
 * Note that the stack is not yet set up!
 */
#define PTE_ATTR	0x007		/* PRESENT+RW+USER */
#define PDE_ATTR	0x067		/* PRESENT+RW+USER+DIRTY+ACCESSED */
#define PGD_ATTR	0x001		/* PRESENT (no other attributes) */

default_entry:
#ifdef CONFIG_X86_PAE

@@ -196,9 +192,9 @@ default_entry:
	movl $pa(pg0), %edi
	movl %edi, pa(init_pg_tables_start)
	movl $pa(swapper_pg_pmd), %edx
	movl $PTE_ATTR, %eax
	movl $PTE_IDENT_ATTR, %eax
10:
	leal PDE_ATTR(%edi),%ecx		/* Create PMD entry */
	leal PDE_IDENT_ATTR(%edi),%ecx		/* Create PMD entry */
	movl %ecx,(%edx)			/* Store PMD entry */
						/* Upper half already zero */
	addl $8,%edx
@@ -215,7 +211,7 @@ default_entry:
	 * End condition: we must map up to and including INIT_MAP_BEYOND_END
	 * bytes beyond the end of our own page tables.
	 */
	leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp
	leal (INIT_MAP_BEYOND_END+PTE_IDENT_ATTR)(%edi),%ebp
	cmpl %ebp,%eax
	jb 10b
1:
@@ -224,7 +220,7 @@ default_entry:
	movl %eax, pa(max_pfn_mapped)

	/* Do early initialization of the fixmap area */
	movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax
	movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
	movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
#else	/* Not PAE */

@@ -233,9 +229,9 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
	movl $pa(pg0), %edi
	movl %edi, pa(init_pg_tables_start)
	movl $pa(swapper_pg_dir), %edx
	movl $PTE_ATTR, %eax
	movl $PTE_IDENT_ATTR, %eax
10:
	leal PDE_ATTR(%edi),%ecx		/* Create PDE entry */
	leal PDE_IDENT_ATTR(%edi),%ecx		/* Create PDE entry */
	movl %ecx,(%edx)			/* Store identity PDE entry */
	movl %ecx,page_pde_offset(%edx)		/* Store kernel PDE entry */
	addl $4,%edx
@@ -249,7 +245,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
	 * bytes beyond the end of our own page tables; the +0x007 is
	 * the attribute bits
	 */
	leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp
	leal (INIT_MAP_BEYOND_END+PTE_IDENT_ATTR)(%edi),%ebp
	cmpl %ebp,%eax
	jb 10b
	movl %edi,pa(init_pg_tables_end)
@@ -257,7 +253,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
	movl %eax, pa(max_pfn_mapped)

	/* Do early initialization of the fixmap area */
	movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax
	movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
	movl %eax,pa(swapper_pg_dir+0xffc)
#endif
	jmp 3f
@@ -634,19 +630,19 @@ ENTRY(empty_zero_page)
	/* Page-aligned for the benefit of paravirt? */
	.align PAGE_SIZE_asm
ENTRY(swapper_pg_dir)
	.long	pa(swapper_pg_pmd+PGD_ATTR),0		/* low identity map */
	.long	pa(swapper_pg_pmd+PGD_IDENT_ATTR),0	/* low identity map */
# if KPMDS == 3
	.long	pa(swapper_pg_pmd+PGD_ATTR),0
	.long	pa(swapper_pg_pmd+PGD_ATTR+0x1000),0
	.long	pa(swapper_pg_pmd+PGD_ATTR+0x2000),0
	.long	pa(swapper_pg_pmd+PGD_IDENT_ATTR),0
	.long	pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0
	.long	pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x2000),0
# elif KPMDS == 2
	.long	0,0
	.long	pa(swapper_pg_pmd+PGD_ATTR),0
	.long	pa(swapper_pg_pmd+PGD_ATTR+0x1000),0
	.long	pa(swapper_pg_pmd+PGD_IDENT_ATTR),0
	.long	pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0
# elif KPMDS == 1
	.long	0,0
	.long	0,0
	.long	pa(swapper_pg_pmd+PGD_ATTR),0
	.long	pa(swapper_pg_pmd+PGD_IDENT_ATTR),0
# else
#  error "Kernel PMDs should be 1, 2 or 3"
# endif
+2 −2
Original line number Diff line number Diff line
@@ -110,7 +110,7 @@ startup_64:
	movq	%rdi, %rax
	shrq	$PMD_SHIFT, %rax
	andq	$(PTRS_PER_PMD - 1), %rax
	leaq	__PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx
	leaq	__PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
	leaq	level2_spare_pgt(%rip), %rbx
	movq	%rdx, 0(%rbx, %rax, 8)
ident_complete:
@@ -374,7 +374,7 @@ NEXT_PAGE(level2_ident_pgt)
	/* Since I easily can, map the first 1G.
	 * Don't set NX because code runs from these pages.
	 */
	PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)

NEXT_PAGE(level2_kernel_pgt)
	/*
+74 −10
Original line number Diff line number Diff line
@@ -195,11 +195,30 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
	unsigned pages_2m = 0, pages_4k = 0;
	unsigned pages_2m, pages_4k;
	int mapping_iter;

	/*
	 * First iteration will setup identity mapping using large/small pages
	 * based on use_pse, with other attributes same as set by
	 * the early code in head_32.S
	 *
	 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
	 * as desired for the kernel identity mapping.
	 *
	 * This two pass mechanism conforms to the TLB app note which says:
	 *
	 *     "Software should not write to a paging-structure entry in a way
	 *      that would change, for any linear address, both the page size
	 *      and either the page frame or attributes."
	 */
	mapping_iter = 1;

	if (!cpu_has_pse)
		use_pse = 0;

repeat:
	pages_2m = pages_4k = 0;
	pfn = start_pfn;
	pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
	pgd = pgd_base + pgd_idx;
@@ -225,6 +244,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
			if (use_pse) {
				unsigned int addr2;
				pgprot_t prot = PAGE_KERNEL_LARGE;
				/*
				 * first pass will use the same initial
				 * identity mapping attribute + _PAGE_PSE.
				 */
				pgprot_t init_prot =
					__pgprot(PTE_IDENT_ATTR |
						 _PAGE_PSE);

				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
					PAGE_OFFSET + PAGE_SIZE-1;
@@ -234,6 +260,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
					prot = PAGE_KERNEL_LARGE_EXEC;

				pages_2m++;
				if (mapping_iter == 1)
					set_pmd(pmd, pfn_pmd(pfn, init_prot));
				else
					set_pmd(pmd, pfn_pmd(pfn, prot));

				pfn += PTRS_PER_PTE;
@@ -246,17 +275,43 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
			for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
				pgprot_t prot = PAGE_KERNEL;
				/*
				 * first pass will use the same initial
				 * identity mapping attribute.
				 */
				pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);

				if (is_kernel_text(addr))
					prot = PAGE_KERNEL_EXEC;

				pages_4k++;
				if (mapping_iter == 1)
					set_pte(pte, pfn_pte(pfn, init_prot));
				else
					set_pte(pte, pfn_pte(pfn, prot));
			}
		}
	}
	if (mapping_iter == 1) {
		/*
		 * update direct mapping page count only in the first
		 * iteration.
		 */
		update_page_count(PG_LEVEL_2M, pages_2m);
		update_page_count(PG_LEVEL_4K, pages_4k);

		/*
		 * local global flush tlb, which will flush the previous
		 * mappings present in both small and large page TLB's.
		 */
		__flush_tlb_all();

		/*
		 * Second iteration will set the actual desired PTE attributes.
		 */
		mapping_iter = 2;
		goto repeat;
	}
}

/*
@@ -719,7 +774,7 @@ void __init setup_bootmem_allocator(void)
	after_init_bootmem = 1;
}

static void __init find_early_table_space(unsigned long end)
static void __init find_early_table_space(unsigned long end, int use_pse)
{
	unsigned long puds, pmds, ptes, tables, start;

@@ -729,7 +784,7 @@ static void __init find_early_table_space(unsigned long end)
	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
	tables += PAGE_ALIGN(pmds * sizeof(pmd_t));

	if (cpu_has_pse) {
	if (use_pse) {
		unsigned long extra;

		extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
@@ -769,12 +824,22 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
	pgd_t *pgd_base = swapper_pg_dir;
	unsigned long start_pfn, end_pfn;
	unsigned long big_page_start;
#ifdef CONFIG_DEBUG_PAGEALLOC
	/*
	 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
	 * This will simplify cpa(), which otherwise needs to support splitting
	 * large pages into small in interrupt context, etc.
	 */
	int use_pse = 0;
#else
	int use_pse = cpu_has_pse;
#endif

	/*
	 * Find space for the kernel direct mapping tables.
	 */
	if (!after_init_bootmem)
		find_early_table_space(end);
		find_early_table_space(end, use_pse);

#ifdef CONFIG_X86_PAE
	set_nx();
@@ -820,7 +885,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
	end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
	if (start_pfn < end_pfn)
		kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
						cpu_has_pse);
					     use_pse);

	/* tail is not big page alignment ? */
	start_pfn = end_pfn;
@@ -983,7 +1048,6 @@ void __init mem_init(void)
	if (boot_cpu_data.wp_works_ok < 0)
		test_wp_bit();

	cpa_init();
	save_pg_dir();
	zap_low_mappings();
}
+82 −28
Original line number Diff line number Diff line
@@ -271,7 +271,8 @@ static __ref void unmap_low_page(void *adr)
}

static unsigned long __meminit
phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end)
phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
	      pgprot_t prot)
{
	unsigned pages = 0;
	unsigned long last_map_addr = end;
@@ -289,36 +290,43 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end)
			break;
		}

		/*
		 * We will re-use the existing mapping.
		 * Xen for example has some special requirements, like mapping
		 * pagetable pages as RO. So assume someone who pre-setup
		 * these mappings are more intelligent.
		 */
		if (pte_val(*pte))
			continue;

		if (0)
			printk("   pte=%p addr=%lx pte=%016lx\n",
			       pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
		set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL));
		last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
		pages++;
		set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
		last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
	}

	update_page_count(PG_LEVEL_4K, pages);

	return last_map_addr;
}

static unsigned long __meminit
phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end)
phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end,
		pgprot_t prot)
{
	pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);

	return phys_pte_init(pte, address, end);
	return phys_pte_init(pte, address, end, prot);
}

static unsigned long __meminit
phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
			 unsigned long page_size_mask)
	      unsigned long page_size_mask, pgprot_t prot)
{
	unsigned long pages = 0;
	unsigned long last_map_addr = end;
	unsigned long start = address;

	int i = pmd_index(address);

@@ -326,6 +334,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
		unsigned long pte_phys;
		pmd_t *pmd = pmd_page + pmd_index(address);
		pte_t *pte;
		pgprot_t new_prot = prot;

		if (address >= end) {
			if (!after_bootmem) {
@@ -339,27 +348,40 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
			if (!pmd_large(*pmd)) {
				spin_lock(&init_mm.page_table_lock);
				last_map_addr = phys_pte_update(pmd, address,
								end);
								end, prot);
				spin_unlock(&init_mm.page_table_lock);
				continue;
			}
			/* Count entries we're using from level2_ident_pgt */
			if (start == 0)
				pages++;
			/*
			 * If we are ok with PG_LEVEL_2M mapping, then we will
			 * use the existing mapping,
			 *
			 * Otherwise, we will split the large page mapping but
			 * use the same existing protection bits except for
			 * large page, so that we don't violate Intel's TLB
			 * Application note (317080) which says, while changing
			 * the page sizes, new and old translations should
			 * not differ with respect to page frame and
			 * attributes.
			 */
			if (page_size_mask & (1 << PG_LEVEL_2M))
				continue;
			new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
		}

		if (page_size_mask & (1<<PG_LEVEL_2M)) {
			pages++;
			spin_lock(&init_mm.page_table_lock);
			set_pte((pte_t *)pmd,
				pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
				pfn_pte(address >> PAGE_SHIFT,
					__pgprot(pgprot_val(prot) | _PAGE_PSE)));
			spin_unlock(&init_mm.page_table_lock);
			last_map_addr = (address & PMD_MASK) + PMD_SIZE;
			continue;
		}

		pte = alloc_low_page(&pte_phys);
		last_map_addr = phys_pte_init(pte, address, end);
		last_map_addr = phys_pte_init(pte, address, end, new_prot);
		unmap_low_page(pte);

		spin_lock(&init_mm.page_table_lock);
@@ -372,12 +394,12 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,

static unsigned long __meminit
phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
			 unsigned long page_size_mask)
		unsigned long page_size_mask, pgprot_t prot)
{
	pmd_t *pmd = pmd_offset(pud, 0);
	unsigned long last_map_addr;

	last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
	last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot);
	__flush_tlb_all();
	return last_map_addr;
}
@@ -394,6 +416,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
		unsigned long pmd_phys;
		pud_t *pud = pud_page + pud_index(addr);
		pmd_t *pmd;
		pgprot_t prot = PAGE_KERNEL;

		if (addr >= end)
			break;
@@ -405,11 +428,27 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
		}

		if (pud_val(*pud)) {
			if (!pud_large(*pud))
			if (!pud_large(*pud)) {
				last_map_addr = phys_pmd_update(pud, addr, end,
							 page_size_mask);
							 page_size_mask, prot);
				continue;
			}
			/*
			 * If we are ok with PG_LEVEL_1G mapping, then we will
			 * use the existing mapping.
			 *
			 * Otherwise, we will split the gbpage mapping but use
			 * the same existing protection  bits except for large
			 * page, so that we don't violate Intel's TLB
			 * Application note (317080) which says, while changing
			 * the page sizes, new and old translations should
			 * not differ with respect to page frame and
			 * attributes.
			 */
			if (page_size_mask & (1 << PG_LEVEL_1G))
				continue;
			prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
		}

		if (page_size_mask & (1<<PG_LEVEL_1G)) {
			pages++;
@@ -422,7 +461,8 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
		}

		pmd = alloc_low_page(&pmd_phys);
		last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
		last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
					      prot);
		unmap_low_page(pmd);

		spin_lock(&init_mm.page_table_lock);
@@ -430,6 +470,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
		spin_unlock(&init_mm.page_table_lock);
	}
	__flush_tlb_all();

	update_page_count(PG_LEVEL_1G, pages);

	return last_map_addr;
@@ -446,13 +487,14 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
	return phys_pud_init(pud, addr, end, page_size_mask);
}

static void __init find_early_table_space(unsigned long end)
static void __init find_early_table_space(unsigned long end, int use_pse,
					  int use_gbpages)
{
	unsigned long puds, pmds, ptes, tables, start;

	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
	tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
	if (direct_gbpages) {
	if (use_gbpages) {
		unsigned long extra;
		extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
		pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
@@ -460,7 +502,7 @@ static void __init find_early_table_space(unsigned long end)
		pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
	tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);

	if (cpu_has_pse) {
	if (use_pse) {
		unsigned long extra;
		extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
		ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -528,6 +570,7 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start,
		pgd_populate(&init_mm, pgd, __va(pud_phys));
		spin_unlock(&init_mm.page_table_lock);
	}
	__flush_tlb_all();

	return last_map_addr;
}
@@ -571,6 +614,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,

	struct map_range mr[NR_RANGE_MR];
	int nr_range, i;
	int use_pse, use_gbpages;

	printk(KERN_INFO "init_memory_mapping\n");

@@ -584,9 +628,21 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
	if (!after_bootmem)
		init_gbpages();

	if (direct_gbpages)
#ifdef CONFIG_DEBUG_PAGEALLOC
	/*
	 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
	 * This will simplify cpa(), which otherwise needs to support splitting
	 * large pages into small in interrupt context, etc.
	 */
	use_pse = use_gbpages = 0;
#else
	use_pse = cpu_has_pse;
	use_gbpages = direct_gbpages;
#endif

	if (use_gbpages)
		page_size_mask |= 1 << PG_LEVEL_1G;
	if (cpu_has_pse)
	if (use_pse)
		page_size_mask |= 1 << PG_LEVEL_2M;

	memset(mr, 0, sizeof(mr));
@@ -647,7 +703,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
			 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));

	if (!after_bootmem)
		find_early_table_space(end);
		find_early_table_space(end, use_pse, use_gbpages);

	for (i = 0; i < nr_range; i++)
		last_map_addr = kernel_physical_mapping_init(
@@ -806,8 +862,6 @@ void __init mem_init(void)
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		initsize >> 10);

	cpa_init();
}

void free_init_pages(char *what, unsigned long begin, unsigned long end)
+19 −0
Original line number Diff line number Diff line
@@ -83,6 +83,25 @@ int page_is_ram(unsigned long pagenr)
	return 0;
}

int pagerange_is_ram(unsigned long start, unsigned long end)
{
	int ram_page = 0, not_rampage = 0;
	unsigned long page_nr;

	for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
	     ++page_nr) {
		if (page_is_ram(page_nr))
			ram_page = 1;
		else
			not_rampage = 1;

		if (ram_page == not_rampage)
			return -1;
	}

	return ram_page;
}

/*
 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 * conflicts.
Loading