Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e4903fb5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] Nail two more simple section mismatch errors
  [IA64] fix section mismatch warnings
  [IA64] rename partial_page
  [IA64] Ensure that machvec is set up takes place before serial console
  [IA64] vector-domain - fix vector_table
  [IA64] vector-domain - handle assign_irq_vector(AUTO_ASSIGN)
parents 6a28a05f cb2e0912
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -249,10 +249,10 @@ ia32_init (void)

#if PAGE_SHIFT > IA32_PAGE_SHIFT
	{
		extern struct kmem_cache *partial_page_cachep;
		extern struct kmem_cache *ia64_partial_page_cachep;

		partial_page_cachep = kmem_cache_create("partial_page_cache",
						sizeof(struct partial_page),
		ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache",
					sizeof(struct ia64_partial_page),
					0, SLAB_PANIC, NULL);
	}
#endif
+6 −6
Original line number Diff line number Diff line
@@ -25,8 +25,8 @@
 * partially mapped pages provide precise accounting of which 4k sub pages
 * are mapped and which ones are not, thereby improving IA-32 compatibility.
 */
struct partial_page {
	struct partial_page	*next; /* linked list, sorted by address */
struct ia64_partial_page {
	struct ia64_partial_page *next; /* linked list, sorted by address */
	struct rb_node		pp_rb;
	/* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64
	 * should suffice.*/
@@ -34,17 +34,17 @@ struct partial_page {
	unsigned int		base;
};

struct partial_page_list {
	struct partial_page	*pp_head; /* list head, points to the lowest
struct ia64_partial_page_list {
	struct ia64_partial_page *pp_head; /* list head, points to the lowest
					   * addressed partial page */
	struct rb_root		ppl_rb;
	struct partial_page	*pp_hint; /* pp_hint->next is the last
	struct ia64_partial_page *pp_hint; /* pp_hint->next is the last
					   * accessed partial page */
	atomic_t		pp_count; /* reference count */
};

#if PAGE_SHIFT > IA32_PAGE_SHIFT
struct partial_page_list* ia32_init_pp_list (void);
struct ia64_partial_page_list* ia32_init_pp_list (void);
#else
# define ia32_init_pp_list()	0
#endif
+41 −40
Original line number Diff line number Diff line
@@ -253,17 +253,17 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
	return ret;
}

/* SLAB cache for partial_page structures */
struct kmem_cache *partial_page_cachep;
/* SLAB cache for ia64_partial_page structures */
struct kmem_cache *ia64_partial_page_cachep;

/*
 * init partial_page_list.
 * init ia64_partial_page_list.
 * return 0 means kmalloc fail.
 */
struct partial_page_list*
struct ia64_partial_page_list*
ia32_init_pp_list(void)
{
	struct partial_page_list *p;
	struct ia64_partial_page_list *p;

	if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
		return p;
@@ -280,12 +280,12 @@ ia32_init_pp_list(void)
 * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
 * be used by later __ia32_insert_pp().
 */
static struct partial_page *
__ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
	struct partial_page **pprev, struct rb_node ***rb_link,
static struct ia64_partial_page *
__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start,
	struct ia64_partial_page **pprev, struct rb_node ***rb_link,
	struct rb_node **rb_parent)
{
	struct partial_page *pp;
	struct ia64_partial_page *pp;
	struct rb_node **__rb_link, *__rb_parent, *rb_prev;

	pp = ppl->pp_hint;
@@ -297,7 +297,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,

	while (*__rb_link) {
		__rb_parent = *__rb_link;
		pp = rb_entry(__rb_parent, struct partial_page, pp_rb);
		pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb);

		if (pp->base == start) {
			ppl->pp_hint = pp;
@@ -314,7 +314,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
	*rb_parent = __rb_parent;
	*pprev = NULL;
	if (rb_prev)
		*pprev = rb_entry(rb_prev, struct partial_page, pp_rb);
		*pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb);
	return NULL;
}

@@ -322,9 +322,9 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
 * insert @pp into @ppl.
 */
static void
__ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
	 struct partial_page *prev, struct rb_node **rb_link,
	struct rb_node *rb_parent)
__ia32_insert_pp(struct ia64_partial_page_list *ppl,
	struct ia64_partial_page *pp, struct ia64_partial_page *prev,
	struct rb_node **rb_link, struct rb_node *rb_parent)
{
	/* link list */
	if (prev) {
@@ -334,7 +334,7 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
		ppl->pp_head = pp;
		if (rb_parent)
			pp->next = rb_entry(rb_parent,
				struct partial_page, pp_rb);
				struct ia64_partial_page, pp_rb);
		else
			pp->next = NULL;
	}
@@ -350,8 +350,8 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
 * delete @pp from partial page list @ppl.
 */
static void
__ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
	struct partial_page *prev)
__ia32_delete_pp(struct ia64_partial_page_list *ppl,
	struct ia64_partial_page *pp, struct ia64_partial_page *prev)
{
	if (prev) {
		prev->next = pp->next;
@@ -363,15 +363,15 @@ __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
			ppl->pp_hint = pp->next;
	}
	rb_erase(&pp->pp_rb, &ppl->ppl_rb);
	kmem_cache_free(partial_page_cachep, pp);
	kmem_cache_free(ia64_partial_page_cachep, pp);
}

static struct partial_page *
__pp_prev(struct partial_page *pp)
static struct ia64_partial_page *
__pp_prev(struct ia64_partial_page *pp)
{
	struct rb_node *prev = rb_prev(&pp->pp_rb);
	if (prev)
		return rb_entry(prev, struct partial_page, pp_rb);
		return rb_entry(prev, struct ia64_partial_page, pp_rb);
	else
		return NULL;
}
@@ -383,7 +383,7 @@ __pp_prev(struct partial_page *pp)
static void
__ia32_delete_pp_range(unsigned int start, unsigned int end)
{
	struct partial_page *pp, *prev;
	struct ia64_partial_page *pp, *prev;
	struct rb_node **rb_link, *rb_parent;

	if (start >= end)
@@ -401,7 +401,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end)
	}

	while (pp && pp->base < end) {
		struct partial_page *tmp = pp->next;
		struct ia64_partial_page *tmp = pp->next;
		__ia32_delete_pp(current->thread.ppl, pp, prev);
		pp = tmp;
	}
@@ -414,7 +414,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end)
static int
__ia32_set_pp(unsigned int start, unsigned int end, int flags)
{
	struct partial_page *pp, *prev;
	struct ia64_partial_page *pp, *prev;
	struct rb_node ** rb_link, *rb_parent;
	unsigned int pstart, start_bit, end_bit, i;

@@ -450,8 +450,8 @@ __ia32_set_pp(unsigned int start, unsigned int end, int flags)
			return 0;
	}

	/* new a partial_page */
	pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
	/* new a ia64_partial_page */
	pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
	if (!pp)
		return -ENOMEM;
	pp->base = pstart;
@@ -504,7 +504,7 @@ ia32_set_pp(unsigned int start, unsigned int end, int flags)
static int
__ia32_unset_pp(unsigned int start, unsigned int end)
{
	struct partial_page *pp, *prev;
	struct ia64_partial_page *pp, *prev;
	struct rb_node ** rb_link, *rb_parent;
	unsigned int pstart, start_bit, end_bit, i;
	struct vm_area_struct *vma;
@@ -532,8 +532,8 @@ __ia32_unset_pp(unsigned int start, unsigned int end)
		return -ENOMEM;
	}

	/* new a partial_page */
	pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
	/* new a ia64_partial_page */
	pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
	if (!pp)
		return -ENOMEM;
	pp->base = pstart;
@@ -605,7 +605,7 @@ ia32_unset_pp(unsigned int *startp, unsigned int *endp)
static int
__ia32_compare_pp(unsigned int start, unsigned int end)
{
	struct partial_page *pp, *prev;
	struct ia64_partial_page *pp, *prev;
	struct rb_node ** rb_link, *rb_parent;
	unsigned int pstart, start_bit, end_bit, size;
	unsigned int first_bit, next_zero_bit;	/* the first range in bitmap */
@@ -682,13 +682,13 @@ ia32_compare_pp(unsigned int *startp, unsigned int *endp)
}

static void
__ia32_drop_pp_list(struct partial_page_list *ppl)
__ia32_drop_pp_list(struct ia64_partial_page_list *ppl)
{
	struct partial_page *pp = ppl->pp_head;
	struct ia64_partial_page *pp = ppl->pp_head;

	while (pp) {
		struct partial_page *next = pp->next;
		kmem_cache_free(partial_page_cachep, pp);
		struct ia64_partial_page *next = pp->next;
		kmem_cache_free(ia64_partial_page_cachep, pp);
		pp = next;
	}

@@ -696,9 +696,9 @@ __ia32_drop_pp_list(struct partial_page_list *ppl)
}

void
ia32_drop_partial_page_list(struct task_struct *task)
ia32_drop_ia64_partial_page_list(struct task_struct *task)
{
	struct partial_page_list* ppl = task->thread.ppl;
	struct ia64_partial_page_list* ppl = task->thread.ppl;

	if (ppl && atomic_dec_and_test(&ppl->pp_count))
		__ia32_drop_pp_list(ppl);
@@ -708,9 +708,9 @@ ia32_drop_partial_page_list(struct task_struct *task)
 * Copy current->thread.ppl to ppl (already initialized).
 */
static int
__ia32_copy_pp_list(struct partial_page_list *ppl)
__ia32_copy_pp_list(struct ia64_partial_page_list *ppl)
{
	struct partial_page *pp, *tmp, *prev;
	struct ia64_partial_page *pp, *tmp, *prev;
	struct rb_node **rb_link, *rb_parent;

	ppl->pp_head = NULL;
@@ -721,7 +721,7 @@ __ia32_copy_pp_list(struct partial_page_list *ppl)
	prev = NULL;

	for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
		tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
		tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
		if (!tmp)
			return -ENOMEM;
		*tmp = *pp;
@@ -734,7 +734,8 @@ __ia32_copy_pp_list(struct partial_page_list *ppl)
}

int
ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags)
ia32_copy_ia64_partial_page_list(struct task_struct *p,
				unsigned long clone_flags)
{
	int retval = 0;

+3 −1
Original line number Diff line number Diff line
@@ -178,7 +178,7 @@ swapper_pg_dir:
halt_msg:
	stringz "Halting kernel\n"

	.text
	.section .text.head,"ax"

	.global start_ap

@@ -392,6 +392,8 @@ self: hint @pause
	br.sptk.many self		// endless loop
END(_start)

	.text

GLOBAL_ENTRY(ia64_save_debug_regs)
	alloc r16=ar.pfs,1,0,0,0
	mov r20=ar.lc			// preserve ar.lc
+12 −14
Original line number Diff line number Diff line
@@ -85,8 +85,8 @@ DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
	[0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
};

static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = {
	[0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE
static cpumask_t vector_table[IA64_NUM_VECTORS] = {
	[0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
};

static int irq_status[NR_IRQS] = {
@@ -123,17 +123,18 @@ static inline int find_unassigned_irq(void)
static inline int find_unassigned_vector(cpumask_t domain)
{
	cpumask_t mask;
	int pos;
	int pos, vector;

	cpus_and(mask, domain, cpu_online_map);
	if (cpus_empty(mask))
		return -EINVAL;

	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
		cpus_and(mask, domain, vector_table[pos]);
		vector = IA64_FIRST_DEVICE_VECTOR + pos;
		cpus_and(mask, domain, vector_table[vector]);
		if (!cpus_empty(mask))
			continue;
		return IA64_FIRST_DEVICE_VECTOR + pos;
		return vector;
	}
	return -ENOSPC;
}
@@ -141,7 +142,7 @@ static inline int find_unassigned_vector(cpumask_t domain)
static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
{
	cpumask_t mask;
	int cpu, pos;
	int cpu;
	struct irq_cfg *cfg = &irq_cfg[irq];

	cpus_and(mask, domain, cpu_online_map);
@@ -156,8 +157,7 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
	cfg->vector = vector;
	cfg->domain = domain;
	irq_status[irq] = IRQ_USED;
	pos = vector - IA64_FIRST_DEVICE_VECTOR;
	cpus_or(vector_table[pos], vector_table[pos], domain);
	cpus_or(vector_table[vector], vector_table[vector], domain);
	return 0;
}

@@ -174,7 +174,7 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain)

static void __clear_irq_vector(int irq)
{
	int vector, cpu, pos;
	int vector, cpu;
	cpumask_t mask;
	cpumask_t domain;
	struct irq_cfg *cfg = &irq_cfg[irq];
@@ -189,8 +189,7 @@ static void __clear_irq_vector(int irq)
	cfg->vector = IRQ_VECTOR_UNASSIGNED;
	cfg->domain = CPU_MASK_NONE;
	irq_status[irq] = IRQ_UNUSED;
	pos = vector - IA64_FIRST_DEVICE_VECTOR;
	cpus_andnot(vector_table[pos], vector_table[pos], domain);
	cpus_andnot(vector_table[vector], vector_table[vector], domain);
}

static void clear_irq_vector(int irq)
@@ -212,9 +211,6 @@ assign_irq_vector (int irq)
	vector = -ENOSPC;

	spin_lock_irqsave(&vector_lock, flags);
	if (irq < 0) {
		goto out;
	}
	for_each_online_cpu(cpu) {
		domain = vector_allocation_domain(cpu);
		vector = find_unassigned_vector(domain);
@@ -223,6 +219,8 @@ assign_irq_vector (int irq)
	}
	if (vector < 0)
		goto out;
	if (irq == AUTO_ASSIGN)
		irq = vector;
	BUG_ON(__bind_irq_vector(irq, vector, domain));
 out:
	spin_unlock_irqrestore(&vector_lock, flags);
Loading