Loading arch/arm/mm/dma-mapping.c +58 −16 Original line number Diff line number Diff line Loading @@ -487,6 +487,15 @@ void __init dma_contiguous_remap(void) struct map_desc map; unsigned long addr; /* * Make start and end PMD_SIZE aligned, observing memory * boundaries */ if (memblock_is_memory(start & PMD_MASK)) start = start & PMD_MASK; if (memblock_is_memory(ALIGN(end, PMD_SIZE))) end = ALIGN(end, PMD_SIZE); if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) Loading @@ -507,8 +516,13 @@ void __init dma_contiguous_remap(void) * and ensures that this code is architecturally compliant. */ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); addr += PMD_SIZE) { pmd_t *pmd; pmd = pmd_off_k(addr); if (pmd_bad(*pmd)) pmd_clear(pmd); } flush_tlb_kernel_range(__phys_to_virt(start), __phys_to_virt(end)); Loading Loading @@ -648,9 +662,14 @@ static void __free_from_contiguous(struct device *dev, struct page *page, static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) { prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? pgprot_writecombine(prot) : pgprot_dmacoherent(prot); if (attrs & DMA_ATTR_WRITE_COMBINE) prot = pgprot_writecombine(prot); else if (attrs & DMA_ATTR_STRONGLY_ORDERED) prot = pgprot_stronglyordered(prot); /* if non-consistent just pass back what was given */ else if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) prot = pgprot_dmacoherent(prot); return prot; } Loading Loading @@ -1824,7 +1843,31 @@ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { return __iommu_map_sg(dev, sg, nents, dir, attrs, false); struct scatterlist *s; int i; size_t ret; struct dma_iommu_mapping *mapping = dev->archdata.mapping; unsigned int total_length = 0, current_offset = 0; dma_addr_t iova; int prot = __dma_direction_to_prot(dir); for_each_sg(sg, s, nents, i) total_length += s->length; iova = __alloc_iova(mapping, total_length); ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot); if (ret != total_length) { __free_iova(mapping, iova, total_length); return 0; } for_each_sg(sg, s, nents, i) { s->dma_address = iova + current_offset; s->dma_length = total_length - current_offset; current_offset += s->length; } return nents; } static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, Loading Loading @@ -1875,7 +1918,15 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); struct dma_iommu_mapping *mapping = dev->archdata.mapping; unsigned int total_length = sg_dma_len(sg); dma_addr_t iova = sg_dma_address(sg); total_length = PAGE_ALIGN((iova & ~PAGE_MASK) + total_length); iova &= PAGE_MASK; iommu_unmap(mapping->domain, iova, total_length); __free_iova(mapping, iova, total_length); } /** Loading Loading @@ -1985,9 +2036,6 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); if (!iova) return; iommu_unmap(mapping->domain, iova, len); __free_iova(mapping, iova, len); } Loading Loading @@ -2085,9 +2133,6 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; if (!iova) return; __dma_page_dev_to_cpu(page, offset, size, dir); } Loading @@ -2099,9 +2144,6 @@ static void arm_iommu_sync_single_for_device(struct device *dev, struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; if (!iova) return; __dma_page_cpu_to_dev(page, offset, size, dir); } Loading arch/arm/mm/init.c +54 −4 Original line number Diff line number Diff line Loading @@ -600,6 +600,9 @@ struct section_perm { pmdval_t mask; pmdval_t prot; pmdval_t clear; pteval_t ptemask; pteval_t pteprot; pteval_t pteclear; }; /* First section-aligned location at or after __start_rodata. */ Loading @@ -613,6 +616,8 @@ static struct section_perm nx_perms[] = { .end = (unsigned long)_stext, .mask = ~PMD_SECT_XN, .prot = PMD_SECT_XN, .ptemask = ~L_PTE_XN, .pteprot = L_PTE_XN, }, /* Make init RW (set NX). */ { Loading @@ -621,6 +626,8 @@ static struct section_perm nx_perms[] = { .end = (unsigned long)_sdata, .mask = ~PMD_SECT_XN, .prot = PMD_SECT_XN, .ptemask = ~L_PTE_XN, .pteprot = L_PTE_XN, }, /* Make rodata NX (set RO in ro_perms below). */ { Loading @@ -629,6 +636,8 @@ static struct section_perm nx_perms[] = { .end = (unsigned long)__init_begin, .mask = ~PMD_SECT_XN, .prot = PMD_SECT_XN, .ptemask = ~L_PTE_XN, .pteprot = L_PTE_XN, }, }; Loading @@ -646,6 +655,8 @@ static struct section_perm ro_perms[] = { .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, .clear = PMD_SECT_AP_WRITE, #endif .ptemask = ~L_PTE_RDONLY, .pteprot = L_PTE_RDONLY, }, }; Loading @@ -654,6 +665,35 @@ static struct section_perm ro_perms[] = { * copied into each mm). During startup, this is the init_mm. Is only * safe to be called with preemption disabled, as under stop_machine(). */ struct pte_data { pteval_t mask; pteval_t val; }; static int __pte_update(pte_t *ptep, pgtable_t token, unsigned long addr, void *d) { struct pte_data *data = d; pte_t pte = *ptep; pte = __pte((pte_val(*ptep) & data->mask) | data->val); set_pte_ext(ptep, pte, 0); return 0; } static inline void pte_update(unsigned long addr, pteval_t mask, pteval_t prot, struct mm_struct *mm) { struct pte_data data; data.mask = mask; data.val = prot; apply_to_page_range(mm, addr, SECTION_SIZE, __pte_update, &data); flush_tlb_kernel_range(addr, addr + SECTION_SIZE); } static inline void section_update(unsigned long addr, pmdval_t mask, pmdval_t prot, struct mm_struct *mm) { Loading Loading @@ -702,11 +742,21 @@ void set_section_perms(struct section_perm *perms, int n, bool set, for (addr = perms[i].start; addr < perms[i].end; addr += SECTION_SIZE) addr += SECTION_SIZE) { pmd_t *pmd; pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); if (pmd_bad(*pmd)) section_update(addr, perms[i].mask, set ? perms[i].prot : perms[i].clear, mm); set ? perms[i].prot : perms[i].clear, mm); else pte_update(addr, perms[i].ptemask, set ? perms[i].pteprot : perms[i].pteclear, mm); } } } /** Loading Loading
arch/arm/mm/dma-mapping.c +58 −16 Original line number Diff line number Diff line Loading @@ -487,6 +487,15 @@ void __init dma_contiguous_remap(void) struct map_desc map; unsigned long addr; /* * Make start and end PMD_SIZE aligned, observing memory * boundaries */ if (memblock_is_memory(start & PMD_MASK)) start = start & PMD_MASK; if (memblock_is_memory(ALIGN(end, PMD_SIZE))) end = ALIGN(end, PMD_SIZE); if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) Loading @@ -507,8 +516,13 @@ void __init dma_contiguous_remap(void) * and ensures that this code is architecturally compliant. */ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); addr += PMD_SIZE) { pmd_t *pmd; pmd = pmd_off_k(addr); if (pmd_bad(*pmd)) pmd_clear(pmd); } flush_tlb_kernel_range(__phys_to_virt(start), __phys_to_virt(end)); Loading Loading @@ -648,9 +662,14 @@ static void __free_from_contiguous(struct device *dev, struct page *page, static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) { prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? pgprot_writecombine(prot) : pgprot_dmacoherent(prot); if (attrs & DMA_ATTR_WRITE_COMBINE) prot = pgprot_writecombine(prot); else if (attrs & DMA_ATTR_STRONGLY_ORDERED) prot = pgprot_stronglyordered(prot); /* if non-consistent just pass back what was given */ else if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) prot = pgprot_dmacoherent(prot); return prot; } Loading Loading @@ -1824,7 +1843,31 @@ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { return __iommu_map_sg(dev, sg, nents, dir, attrs, false); struct scatterlist *s; int i; size_t ret; struct dma_iommu_mapping *mapping = dev->archdata.mapping; unsigned int total_length = 0, current_offset = 0; dma_addr_t iova; int prot = __dma_direction_to_prot(dir); for_each_sg(sg, s, nents, i) total_length += s->length; iova = __alloc_iova(mapping, total_length); ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot); if (ret != total_length) { __free_iova(mapping, iova, total_length); return 0; } for_each_sg(sg, s, nents, i) { s->dma_address = iova + current_offset; s->dma_length = total_length - current_offset; current_offset += s->length; } return nents; } static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, Loading Loading @@ -1875,7 +1918,15 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); struct dma_iommu_mapping *mapping = dev->archdata.mapping; unsigned int total_length = sg_dma_len(sg); dma_addr_t iova = sg_dma_address(sg); total_length = PAGE_ALIGN((iova & ~PAGE_MASK) + total_length); iova &= PAGE_MASK; iommu_unmap(mapping->domain, iova, total_length); __free_iova(mapping, iova, total_length); } /** Loading Loading @@ -1985,9 +2036,6 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); if (!iova) return; iommu_unmap(mapping->domain, iova, len); __free_iova(mapping, iova, len); } Loading Loading @@ -2085,9 +2133,6 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; if (!iova) return; __dma_page_dev_to_cpu(page, offset, size, dir); } Loading @@ -2099,9 +2144,6 @@ static void arm_iommu_sync_single_for_device(struct device *dev, struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; if (!iova) return; __dma_page_cpu_to_dev(page, offset, size, dir); } Loading
arch/arm/mm/init.c +54 −4 Original line number Diff line number Diff line Loading @@ -600,6 +600,9 @@ struct section_perm { pmdval_t mask; pmdval_t prot; pmdval_t clear; pteval_t ptemask; pteval_t pteprot; pteval_t pteclear; }; /* First section-aligned location at or after __start_rodata. */ Loading @@ -613,6 +616,8 @@ static struct section_perm nx_perms[] = { .end = (unsigned long)_stext, .mask = ~PMD_SECT_XN, .prot = PMD_SECT_XN, .ptemask = ~L_PTE_XN, .pteprot = L_PTE_XN, }, /* Make init RW (set NX). */ { Loading @@ -621,6 +626,8 @@ static struct section_perm nx_perms[] = { .end = (unsigned long)_sdata, .mask = ~PMD_SECT_XN, .prot = PMD_SECT_XN, .ptemask = ~L_PTE_XN, .pteprot = L_PTE_XN, }, /* Make rodata NX (set RO in ro_perms below). */ { Loading @@ -629,6 +636,8 @@ static struct section_perm nx_perms[] = { .end = (unsigned long)__init_begin, .mask = ~PMD_SECT_XN, .prot = PMD_SECT_XN, .ptemask = ~L_PTE_XN, .pteprot = L_PTE_XN, }, }; Loading @@ -646,6 +655,8 @@ static struct section_perm ro_perms[] = { .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, .clear = PMD_SECT_AP_WRITE, #endif .ptemask = ~L_PTE_RDONLY, .pteprot = L_PTE_RDONLY, }, }; Loading @@ -654,6 +665,35 @@ static struct section_perm ro_perms[] = { * copied into each mm). During startup, this is the init_mm. Is only * safe to be called with preemption disabled, as under stop_machine(). */ struct pte_data { pteval_t mask; pteval_t val; }; static int __pte_update(pte_t *ptep, pgtable_t token, unsigned long addr, void *d) { struct pte_data *data = d; pte_t pte = *ptep; pte = __pte((pte_val(*ptep) & data->mask) | data->val); set_pte_ext(ptep, pte, 0); return 0; } static inline void pte_update(unsigned long addr, pteval_t mask, pteval_t prot, struct mm_struct *mm) { struct pte_data data; data.mask = mask; data.val = prot; apply_to_page_range(mm, addr, SECTION_SIZE, __pte_update, &data); flush_tlb_kernel_range(addr, addr + SECTION_SIZE); } static inline void section_update(unsigned long addr, pmdval_t mask, pmdval_t prot, struct mm_struct *mm) { Loading Loading @@ -702,11 +742,21 @@ void set_section_perms(struct section_perm *perms, int n, bool set, for (addr = perms[i].start; addr < perms[i].end; addr += SECTION_SIZE) addr += SECTION_SIZE) { pmd_t *pmd; pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); if (pmd_bad(*pmd)) section_update(addr, perms[i].mask, set ? perms[i].prot : perms[i].clear, mm); set ? perms[i].prot : perms[i].clear, mm); else pte_update(addr, perms[i].ptemask, set ? perms[i].pteprot : perms[i].pteclear, mm); } } } /** Loading