Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b3967dc5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] Prefetch mmap_sem in ia64_do_page_fault()
  [IA64] Failure to resume after INIT in user space
  [IA64] Pass more data to the MCA/INIT notify_die hooks
  [IA64] always map VGA framebuffer UC, even if it supports WB
  [IA64] fix bug in ia64 __mutex_fastpath_trylock
  [IA64] for_each_possible_cpu: ia64
  [IA64] update HP CSR space discovery via ACPI
  [IA64] Wire up new syscalls {set,get}_robust_list
  [IA64] 'msg' may be used uninitialized in xpc_initiate_allocate()
  [IA64] Wire up new syscall sync_file_range()
parents cde227af 0ffe9849
Loading
Loading
Loading
Loading
+71 −72
Original line number Diff line number Diff line
/*
 * arch/ia64/kernel/acpi-ext.c
 * (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P.
 *	Alex Williamson <alex.williamson@hp.com>
 *	Bjorn Helgaas <bjorn.helgaas@hp.com>
 *
 * Copyright (C) 2003 Hewlett-Packard
 * Copyright (C) Alex Williamson
 * Copyright (C) Bjorn Helgaas
 *
 * Vendor specific extensions to ACPI.
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/efi.h>

#include <asm/acpi-ext.h>

struct acpi_vendor_descriptor {
	u8 guid_id;
	efi_guid_t guid;
};
/*
 * Device CSRs that do not appear in PCI config space should be described
 * via ACPI.  This would normally be done with Address Space Descriptors
 * marked as "consumer-only," but old versions of Windows and Linux ignore
 * the producer/consumer flag, so HP invented a vendor-defined resource to
 * describe the location and size of CSR space.
 */

struct acpi_vendor_info {
	struct acpi_vendor_descriptor *descriptor;
	u8 *data;
	u32 length;
struct acpi_vendor_uuid hp_ccsr_uuid = {
	.subtype = 2,
	.data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a,
	    0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad },
};

acpi_status
acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
static acpi_status hp_ccsr_locate(acpi_handle obj, u64 *base, u64 *length)
{
	struct acpi_vendor_info *info = (struct acpi_vendor_info *)context;
	struct acpi_resource_vendor *vendor;
	struct acpi_vendor_descriptor *descriptor;
	u32 byte_length;
	acpi_status status;
	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
	struct acpi_resource *resource;
	struct acpi_resource_vendor_typed *vendor;

	if (resource->type != ACPI_RESOURCE_TYPE_VENDOR)
		return AE_OK;
	status = acpi_get_vendor_resource(obj, METHOD_NAME__CRS, &hp_ccsr_uuid,
		&buffer);

	vendor = (struct acpi_resource_vendor *)&resource->data;
	descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data;
	if (vendor->byte_length <= sizeof(*info->descriptor) ||
	    descriptor->guid_id != info->descriptor->guid_id ||
	    efi_guidcmp(descriptor->guid, info->descriptor->guid))
		return AE_OK;
	resource = buffer.pointer;
	vendor = &resource->data.vendor_typed;

	byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor);
	info->data = acpi_os_allocate(byte_length);
	if (!info->data)
		return AE_NO_MEMORY;
	if (ACPI_FAILURE(status) || vendor->byte_length < 16) {
		status = AE_NOT_FOUND;
		goto exit;
	}

	memcpy(info->data,
	       vendor->byte_data + sizeof(struct acpi_vendor_descriptor),
	       byte_length);
	info->length = byte_length;
	return AE_CTRL_TERMINATE;
	memcpy(base, vendor->byte_data, sizeof(*base));
	memcpy(length, vendor->byte_data + 8, sizeof(*length));

  exit:
	acpi_os_free(buffer.pointer);
	return status;
}

acpi_status
acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
			  u8 ** data, u32 * byte_length)
struct csr_space {
	u64	base;
	u64	length;
};

static acpi_status find_csr_space(struct acpi_resource *resource, void *data)
{
	struct acpi_vendor_info info;
	struct csr_space *space = data;
	struct acpi_resource_address64 addr;
	acpi_status status;

	info.descriptor = id;
	info.data = NULL;
	status = acpi_resource_to_address64(resource, &addr);
	if (ACPI_SUCCESS(status) &&
	    addr.resource_type == ACPI_MEMORY_RANGE &&
	    addr.address_length &&
	    addr.producer_consumer == ACPI_CONSUMER) {
		space->base = addr.minimum;
		space->length = addr.address_length;
		return AE_CTRL_TERMINATE;
	}
	return AE_OK;		/* keep looking */
}

	acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match,
			    &info);
	if (!info.data)
static acpi_status hp_crs_locate(acpi_handle obj, u64 *base, u64 *length)
{
	struct csr_space space = { 0, 0 };

	acpi_walk_resources(obj, METHOD_NAME__CRS, find_csr_space, &space);
	if (!space.length)
		return AE_NOT_FOUND;

	*data = info.data;
	*byte_length = info.length;
	*base = space.base;
	*length = space.length;
	return AE_OK;
}

struct acpi_vendor_descriptor hp_ccsr_descriptor = {
	.guid_id = 2,
	.guid =
	    EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01,
		     0x37, 0x0e, 0xad)
};

acpi_status hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
{
	acpi_status status;
	u8 *data;
	u32 length;

	status =
	    acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length);

	if (ACPI_FAILURE(status) || length != 16)
		return AE_NOT_FOUND;

	memcpy(csr_base, data, sizeof(*csr_base));
	memcpy(csr_length, data + 8, sizeof(*csr_length));
	acpi_os_free(data);
	status = hp_ccsr_locate(obj, csr_base, csr_length);
	if (ACPI_SUCCESS(status))
		return status;

	return AE_OK;
	return hp_crs_locate(obj, csr_base, csr_length);
}

EXPORT_SYMBOL(hp_acpi_csr_space);
+3 −0
Original line number Diff line number Diff line
@@ -1606,5 +1606,8 @@ sys_call_table:
	data8 sys_ni_syscall			// 1295 reserved for ppoll
	data8 sys_unshare
	data8 sys_splice
	data8 sys_set_robust_list
	data8 sys_get_robust_list
	data8 sys_sync_file_range		// 1300

	.org sys_call_table + 8*NR_syscalls	// guard against failures to increase NR_syscalls
+21 −12
Original line number Diff line number Diff line
@@ -581,10 +581,12 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
{
	unsigned long flags;
	int cpu = smp_processor_id();
	struct ia64_mca_notify_die nd =
		{ .sos = NULL, .monarch_cpu = &monarch_cpu };

	/* Mask all interrupts */
	local_irq_save(flags);
	if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, 0, 0, 0)
	if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, (long)&nd, 0, 0)
			== NOTIFY_STOP)
		ia64_mca_spin(__FUNCTION__);

@@ -594,7 +596,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
	 */
	ia64_sal_mc_rendez();

	if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, 0, 0, 0)
	if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, (long)&nd, 0, 0)
			== NOTIFY_STOP)
		ia64_mca_spin(__FUNCTION__);

@@ -602,7 +604,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
	while (monarch_cpu != -1)
	       cpu_relax();	/* spin until monarch leaves */

	if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, 0, 0, 0)
	if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, (long)&nd, 0, 0)
			== NOTIFY_STOP)
		ia64_mca_spin(__FUNCTION__);

@@ -1023,6 +1025,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
		&sos->proc_state_param;
	int recover, cpu = smp_processor_id();
	task_t *previous_current;
	struct ia64_mca_notify_die nd =
		{ .sos = sos, .monarch_cpu = &monarch_cpu };

	oops_in_progress = 1;	/* FIXME: make printk NMI/MCA/INIT safe */
	console_loglevel = 15;	/* make sure printks make it to console */
@@ -1031,7 +1035,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,

	previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
	monarch_cpu = cpu;
	if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, 0, 0, 0)
	if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
			== NOTIFY_STOP)
		ia64_mca_spin(__FUNCTION__);
	ia64_wait_for_slaves(cpu);
@@ -1043,7 +1047,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
	 * spinning in SAL does not work.
	 */
	ia64_mca_wakeup_all();
	if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, 0, 0, 0)
	if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
			== NOTIFY_STOP)
		ia64_mca_spin(__FUNCTION__);

@@ -1064,7 +1068,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
		ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
		sos->os_status = IA64_MCA_CORRECTED;
	}
	if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, 0, 0, recover)
	if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
			== NOTIFY_STOP)
		ia64_mca_spin(__FUNCTION__);

@@ -1351,10 +1355,14 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
	static atomic_t monarchs;
	task_t *previous_current;
	int cpu = smp_processor_id();
	struct ia64_mca_notify_die nd =
		{ .sos = sos, .monarch_cpu = &monarch_cpu };

	oops_in_progress = 1;	/* FIXME: make printk NMI/MCA/INIT safe */
	console_loglevel = 15;	/* make sure printks make it to console */

	(void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0);

	printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
		sos->proc_state_param, cpu, sos->monarch);
	salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
@@ -1390,15 +1398,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
		ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
		while (monarch_cpu == -1)
		       cpu_relax();	/* spin until monarch enters */
		if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, 0, 0, 0)
		if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
				== NOTIFY_STOP)
			ia64_mca_spin(__FUNCTION__);
		if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, 0, 0, 0)
		if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
				== NOTIFY_STOP)
			ia64_mca_spin(__FUNCTION__);
		while (monarch_cpu != -1)
		       cpu_relax();	/* spin until monarch leaves */
		if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, 0, 0, 0)
		if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
				== NOTIFY_STOP)
			ia64_mca_spin(__FUNCTION__);
		printk("Slave on cpu %d returning to normal service.\n", cpu);
@@ -1409,7 +1417,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
	}

	monarch_cpu = cpu;
	if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, 0, 0, 0)
	if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
			== NOTIFY_STOP)
		ia64_mca_spin(__FUNCTION__);

@@ -1426,10 +1434,10 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
	 * to default_monarch_init_process() above and just print all the
	 * tasks.
	 */
	if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, 0, 0, 0)
	if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
			== NOTIFY_STOP)
		ia64_mca_spin(__FUNCTION__);
	if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, 0, 0, 0)
	if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
			== NOTIFY_STOP)
		ia64_mca_spin(__FUNCTION__);
	printk("\nINIT dump complete.  Monarch on cpu %d returning to normal service.\n", cpu);
@@ -1631,6 +1639,7 @@ ia64_mca_init(void)
			printk(KERN_INFO "Increasing MCA rendezvous timeout from "
				"%ld to %ld milliseconds\n", timeout, isrv.v0);
			timeout = isrv.v0;
			(void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0);
			continue;
		}
		printk(KERN_ERR "Failed to register rendezvous interrupt "
+5 −5
Original line number Diff line number Diff line
@@ -827,7 +827,7 @@ ia64_state_restore:
	ld8 r9=[temp2],16	// sal_gp
	;;
	ld8 r22=[temp1],16	// pal_min_state, virtual
	ld8 r21=[temp2],16	// prev_IA64_KR_CURRENT
	ld8 r13=[temp2],16	// prev_IA64_KR_CURRENT
	;;
	ld8 r16=[temp1],16	// prev_IA64_KR_CURRENT_STACK
	ld8 r20=[temp2],16	// prev_task
@@ -848,7 +848,7 @@ ia64_state_restore:
	mov cr.iim=temp3
	mov cr.iha=temp4
	dep r22=0,r22,62,1	// pal_min_state, physical, uncached
	mov IA64_KR(CURRENT)=r21
	mov IA64_KR(CURRENT)=r13
	ld8 r8=[temp1]		// os_status
	ld8 r10=[temp2]		// context

@@ -856,7 +856,7 @@ ia64_state_restore:
	 * avoid any dependencies on the algorithm in ia64_switch_to(), just
	 * purge any existing CURRENT_STACK mapping and insert the new one.
	 *
	 * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains
	 * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains
	 * prev_IA64_KR_CURRENT, these values may have been changed by the C
	 * code.  Do not use r8, r9, r10, r22, they contain values ready for
	 * the return to SAL.
@@ -873,7 +873,7 @@ ia64_state_restore:
	;;
	srlz.d

	extr.u r19=r21,61,3			// r21 = prev_IA64_KR_CURRENT
	extr.u r19=r13,61,3			// r13 = prev_IA64_KR_CURRENT
	shl r20=r16,IA64_GRANULE_SHIFT		// r16 = prev_IA64_KR_CURRENT_STACK
	movl r21=PAGE_KERNEL			// page properties
	;;
@@ -883,7 +883,7 @@ ia64_state_restore:
(p6)	br.spnt 1f				// the dreaded cpu 0 idle task in region 5:(
	;;
	mov cr.itir=r18
	mov cr.ifa=r21
	mov cr.ifa=r13
	mov r20=IA64_TR_CURRENT_STACK
	;;
	itr.d dtr[r20]=r21
+1 −1
Original line number Diff line number Diff line
@@ -947,7 +947,7 @@ void
percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
{
	unsigned int i;
	for_each_cpu(i) {
	for_each_possible_cpu(i) {
		memcpy(pcpudst + __per_cpu_offset[i], src, size);
	}
}
Loading