Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4786b4ee authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: (27 commits)
  [IA64] kdump: Add crash_save_vmcoreinfo for INIT
  [IA64] Fix NUMA configuration issue
  [IA64] Itanium Spec updates
  [IA64] Untangle sync_icache_dcache() page size determination
  [IA64] arch/ia64/kernel/: use time_* macros
  [IA64] remove redundant display of free swap space in show_mem()
  [IA64] make IOMMU respect the segment boundary limits
  [IA64] kprobes: kprobe-booster for ia64
  [IA64] fix getpid and set_tid_address fast system calls for pid namespaces
  [IA64] Replace explicit jiffies tests with time_* macros.
  [IA64] use goto to jump out do/while_each_thread
  [IA64] Fix unlock ordering in smp_callin
  [IA64] pgd_offset() constfication.
  [IA64] kdump: crash.c coding style fix
  [IA64] kdump: add kdump_on_fatal_mca
  [IA64] Minimize per_cpu reservations.
  [IA64] Correct pernodesize calculation.
  [IA64] Kernel parameter for max number of concurrent global TLB purges
  [IA64] Multiple outstanding ptc.g instruction support
  [IA64] Implement smp_call_function_mask for ia64
  ...
parents 253ba4e7 71b264f8
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -1362,6 +1362,10 @@ and is between 256 and 4096 characters. It is defined in the file

	nowb		[ARM]

	nptcg=		[IA64] Override max number of concurrent global TLB
			purges which is reported from either PAL_VM_SUMMARY or
			SAL PALO.

	numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
			one of ['zone', 'node', 'default'] can be specified
			This can be set from sysctl after boot.
+14 −0
Original line number Diff line number Diff line
@@ -283,6 +283,17 @@ config FORCE_MAX_ZONEORDER
	default "17" if HUGETLB_PAGE
	default "11"

config VIRT_CPU_ACCOUNTING
	bool "Deterministic task and CPU time accounting"
	default n
	help
	  Select this option to enable more accurate task and CPU time
	  accounting.  This is done by reading a CPU counter on each
	  kernel entry and exit and on transitions within the kernel
	  between system, softirq and hardirq state, so there is a
	  small performance impact.
	  If in doubt, say N here.

config SMP
	bool "Symmetric multi-processing support"
	help
@@ -611,6 +622,9 @@ config IRQ_PER_CPU
	bool
	default y

config IOMMU_HELPER
	def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)

source "arch/ia64/hp/sim/Kconfig"

source "arch/ia64/Kconfig.debug"
+39 −17
Original line number Diff line number Diff line
@@ -35,6 +35,7 @@
#include <linux/nodemask.h>
#include <linux/bitops.h>         /* hweight64() */
#include <linux/crash_dump.h>
#include <linux/iommu-helper.h>

#include <asm/delay.h>		/* ia64_get_itc() */
#include <asm/io.h>
@@ -460,6 +461,13 @@ get_iovp_order (unsigned long size)
	return order;
}

static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
				 unsigned int bitshiftcnt)
{
	return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
		+ bitshiftcnt;
}

/**
 * sba_search_bitmap - find free space in IO PDIR resource bitmap
 * @ioc: IO MMU structure which owns the pdir we are interested in.
@@ -471,15 +479,25 @@ get_iovp_order (unsigned long size)
 * Cool perf optimization: search for log2(size) bits at a time.
 */
static SBA_INLINE unsigned long
sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
sba_search_bitmap(struct ioc *ioc, struct device *dev,
		  unsigned long bits_wanted, int use_hint)
{
	unsigned long *res_ptr;
	unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
	unsigned long flags, pide = ~0UL;
	unsigned long flags, pide = ~0UL, tpide;
	unsigned long boundary_size;
	unsigned long shift;
	int ret;

	ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
	ASSERT(res_ptr < res_end);

	boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
	boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;

	BUG_ON(ioc->ibase & ~iovp_mask);
	shift = ioc->ibase >> iovp_shift;

	spin_lock_irqsave(&ioc->res_lock, flags);

	/* Allow caller to force a search through the entire resource space */
@@ -504,9 +522,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
			if (likely(*res_ptr != ~0UL)) {
				bitshiftcnt = ffz(*res_ptr);
				*res_ptr |= (1UL << bitshiftcnt);
				pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
				pide <<= 3;	/* convert to bit address */
				pide += bitshiftcnt;
				pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
				ioc->res_bitshift = bitshiftcnt + bits_wanted;
				goto found_it;
			}
@@ -535,11 +551,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
			DBG_RES("    %p %lx %lx\n", res_ptr, mask, *res_ptr);
			ASSERT(0 != mask);
			for (; mask ; mask <<= o, bitshiftcnt += o) {
				if(0 == ((*res_ptr) & mask)) {
				tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
				ret = iommu_is_span_boundary(tpide, bits_wanted,
							     shift,
							     boundary_size);
				if ((0 == ((*res_ptr) & mask)) && !ret) {
					*res_ptr |= mask;     /* mark resources busy! */
					pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
					pide <<= 3;	/* convert to bit address */
					pide += bitshiftcnt;
					pide = tpide;
					ioc->res_bitshift = bitshiftcnt + bits_wanted;
					goto found_it;
				}
@@ -560,6 +578,11 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
		end = res_end - qwords;

		for (; res_ptr < end; res_ptr++) {
			tpide = ptr_to_pide(ioc, res_ptr, 0);
			ret = iommu_is_span_boundary(tpide, bits_wanted,
						     shift, boundary_size);
			if (ret)
				goto next_ptr;
			for (i = 0 ; i < qwords ; i++) {
				if (res_ptr[i] != 0)
					goto next_ptr;
@@ -572,8 +595,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
				res_ptr[i] = ~0UL;
			res_ptr[i] |= RESMAP_MASK(bits);

			pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
			pide <<= 3;	/* convert to bit address */
			pide = tpide;
			res_ptr += qwords;
			ioc->res_bitshift = bits;
			goto found_it;
@@ -605,7 +627,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
 * resource bit map.
 */
static int
sba_alloc_range(struct ioc *ioc, size_t size)
sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
{
	unsigned int pages_needed = size >> iovp_shift;
#ifdef PDIR_SEARCH_TIMING
@@ -622,9 +644,9 @@ sba_alloc_range(struct ioc *ioc, size_t size)
	/*
	** "seek and ye shall find"...praying never hurts either...
	*/
	pide = sba_search_bitmap(ioc, pages_needed, 1);
	pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
	if (unlikely(pide >= (ioc->res_size << 3))) {
		pide = sba_search_bitmap(ioc, pages_needed, 0);
		pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
		if (unlikely(pide >= (ioc->res_size << 3))) {
#if DELAYED_RESOURCE_CNT > 0
			unsigned long flags;
@@ -653,7 +675,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
			}
			spin_unlock_irqrestore(&ioc->saved_lock, flags);

			pide = sba_search_bitmap(ioc, pages_needed, 0);
			pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
			if (unlikely(pide >= (ioc->res_size << 3)))
				panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
				      ioc->ioc_hpa);
@@ -936,7 +958,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
	spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif

	pide = sba_alloc_range(ioc, size);
	pide = sba_alloc_range(ioc, dev, size);

	iovp = (dma_addr_t) pide << iovp_shift;

@@ -1373,7 +1395,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
		dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
		ASSERT(dma_len <= DMA_CHUNK_SIZE);
		dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
			| (sba_alloc_range(ioc, dma_len) << iovp_shift)
			| (sba_alloc_range(ioc, dev, dma_len) << iovp_shift)
			| dma_offset);
		n_mappings++;
	}
+13 −1
Original line number Diff line number Diff line
@@ -30,7 +30,19 @@ struct elf_siginfo
	int	si_errno;			/* errno */
};

#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
 * Hacks are here since types between compat_timeval (= pair of s32) and
 * ia64-native timeval (= pair of s64) are not compatible, at least a file
 * arch/ia64/ia32/../../../fs/binfmt_elf.c will get warnings from compiler on
 * use of cputime_to_timeval(), which usually an alias of jiffies_to_timeval().
 */
#define cputime_to_timeval(a,b) \
	do { (b)->tv_usec = 0; (b)->tv_sec = (a)/NSEC_PER_SEC; } while(0)
#else
#define jiffies_to_timeval(a,b) \
	do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; } while(0)
#endif

struct elf_prstatus
{
+624 −25
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@
#include <linux/eventpoll.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/stat.h>
#include <linux/ipc.h>
#include <linux/capability.h>
@@ -2387,16 +2388,45 @@ get_free_idx (void)
	return -ESRCH;
}

static void set_tls_desc(struct task_struct *p, int idx,
		const struct ia32_user_desc *info, int n)
{
	struct thread_struct *t = &p->thread;
	struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
	int cpu;

	/*
	 * We must not get preempted while modifying the TLS.
	 */
	cpu = get_cpu();

	while (n-- > 0) {
		if (LDT_empty(info)) {
			desc->a = 0;
			desc->b = 0;
		} else {
			desc->a = LDT_entry_a(info);
			desc->b = LDT_entry_b(info);
		}

		++info;
		++desc;
	}

	if (t == &current->thread)
		load_TLS(t, cpu);

	put_cpu();
}

/*
 * Set a given TLS descriptor:
 */
asmlinkage int
sys32_set_thread_area (struct ia32_user_desc __user *u_info)
{
	struct thread_struct *t = &current->thread;
	struct ia32_user_desc info;
	struct desc_struct *desc;
	int cpu, idx;
	int idx;

	if (copy_from_user(&info, u_info, sizeof(info)))
		return -EFAULT;
@@ -2416,18 +2446,7 @@ sys32_set_thread_area (struct ia32_user_desc __user *u_info)
	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
		return -EINVAL;

	desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;

	cpu = smp_processor_id();

	if (LDT_empty(&info)) {
		desc->a = 0;
		desc->b = 0;
	} else {
		desc->a = LDT_entry_a(&info);
		desc->b = LDT_entry_b(&info);
	}
	load_TLS(t, cpu);
	set_tls_desc(current, idx, &info, 1);
	return 0;
}

@@ -2451,6 +2470,20 @@ sys32_set_thread_area (struct ia32_user_desc __user *u_info)
#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)

static void fill_user_desc(struct ia32_user_desc *info, int idx,
		const struct desc_struct *desc)
{
	info->entry_number = idx;
	info->base_addr = GET_BASE(desc);
	info->limit = GET_LIMIT(desc);
	info->seg_32bit = GET_32BIT(desc);
	info->contents = GET_CONTENTS(desc);
	info->read_exec_only = !GET_WRITABLE(desc);
	info->limit_in_pages = GET_LIMIT_PAGES(desc);
	info->seg_not_present = !GET_PRESENT(desc);
	info->useable = GET_USEABLE(desc);
}

asmlinkage int
sys32_get_thread_area (struct ia32_user_desc __user *u_info)
{
@@ -2464,22 +2497,588 @@ sys32_get_thread_area (struct ia32_user_desc __user *u_info)
		return -EINVAL;

	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;

	info.entry_number = idx;
	info.base_addr = GET_BASE(desc);
	info.limit = GET_LIMIT(desc);
	info.seg_32bit = GET_32BIT(desc);
	info.contents = GET_CONTENTS(desc);
	info.read_exec_only = !GET_WRITABLE(desc);
	info.limit_in_pages = GET_LIMIT_PAGES(desc);
	info.seg_not_present = !GET_PRESENT(desc);
	info.useable = GET_USEABLE(desc);
	fill_user_desc(&info, idx, desc);

	if (copy_to_user(u_info, &info, sizeof(info)))
		return -EFAULT;
	return 0;
}

struct regset_get {
	void *kbuf;
	void __user *ubuf;
};

struct regset_set {
	const void *kbuf;
	const void __user *ubuf;
};

struct regset_getset {
	struct task_struct *target;
	const struct user_regset *regset;
	union {
		struct regset_get get;
		struct regset_set set;
	} u;
	unsigned int pos;
	unsigned int count;
	int ret;
};

static void getfpreg(struct task_struct *task, int regno, int *val)
{
	switch (regno / sizeof(int)) {
	case 0:
		*val = task->thread.fcr & 0xffff;
		break;
	case 1:
		*val = task->thread.fsr & 0xffff;
		break;
	case 2:
		*val = (task->thread.fsr>>16) & 0xffff;
		break;
	case 3:
		*val = task->thread.fir;
		break;
	case 4:
		*val = (task->thread.fir>>32) & 0xffff;
		break;
	case 5:
		*val = task->thread.fdr;
		break;
	case 6:
		*val = (task->thread.fdr >> 32) & 0xffff;
		break;
	}
}

static void setfpreg(struct task_struct *task, int regno, int val)
{
	switch (regno / sizeof(int)) {
	case 0:
		task->thread.fcr = (task->thread.fcr & (~0x1f3f))
			| (val & 0x1f3f);
		break;
	case 1:
		task->thread.fsr = (task->thread.fsr & (~0xffff)) | val;
		break;
	case 2:
		task->thread.fsr = (task->thread.fsr & (~0xffff0000))
			| (val << 16);
		break;
	case 3:
		task->thread.fir = (task->thread.fir & (~0xffffffff)) | val;
		break;
	case 5:
		task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | val;
		break;
	}
}

static void access_fpreg_ia32(int regno, void *reg,
		struct pt_regs *pt, struct switch_stack *sw,
		int tos, int write)
{
	void *f;

	if ((regno += tos) >= 8)
		regno -= 8;
	if (regno < 4)
		f = &pt->f8 + regno;
	else if (regno <= 7)
		f = &sw->f12 + (regno - 4);
	else {
		printk(KERN_ERR "regno must be less than 7 \n");
		 return;
	}

	if (write)
		memcpy(f, reg, sizeof(struct _fpreg_ia32));
	else
		memcpy(reg, f, sizeof(struct _fpreg_ia32));
}

static void do_fpregs_get(struct unw_frame_info *info, void *arg)
{
	struct regset_getset *dst = arg;
	struct task_struct *task = dst->target;
	struct pt_regs *pt;
	int start, end, tos;
	char buf[80];

	if (dst->count == 0 || unw_unwind_to_user(info) < 0)
		return;
	if (dst->pos < 7 * sizeof(int)) {
		end = min((dst->pos + dst->count),
			(unsigned int)(7 * sizeof(int)));
		for (start = dst->pos; start < end; start += sizeof(int))
			getfpreg(task, start, (int *)(buf + start));
		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
				&dst->u.get.kbuf, &dst->u.get.ubuf, buf,
				0, 7 * sizeof(int));
		if (dst->ret || dst->count == 0)
			return;
	}
	if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
		pt = task_pt_regs(task);
		tos = (task->thread.fsr >> 11) & 7;
		end = min(dst->pos + dst->count,
			(unsigned int)(sizeof(struct ia32_user_i387_struct)));
		start = (dst->pos - 7 * sizeof(int)) /
			sizeof(struct _fpreg_ia32);
		end = (end - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
		for (; start < end; start++)
			access_fpreg_ia32(start,
				(struct _fpreg_ia32 *)buf + start,
				pt, info->sw, tos, 0);
		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
				&dst->u.get.kbuf, &dst->u.get.ubuf,
				buf, 7 * sizeof(int),
				sizeof(struct ia32_user_i387_struct));
		if (dst->ret || dst->count == 0)
			return;
	}
}

static void do_fpregs_set(struct unw_frame_info *info, void *arg)
{
	struct regset_getset *dst = arg;
	struct task_struct *task = dst->target;
	struct pt_regs *pt;
	char buf[80];
	int end, start, tos;

	if (dst->count == 0 || unw_unwind_to_user(info) < 0)
		return;

	if (dst->pos < 7 * sizeof(int)) {
		start = dst->pos;
		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
				&dst->u.set.kbuf, &dst->u.set.ubuf, buf,
				0, 7 * sizeof(int));
		if (dst->ret)
			return;
		for (; start < dst->pos; start += sizeof(int))
			setfpreg(task, start, *((int *)(buf + start)));
		if (dst->count == 0)
			return;
	}
	if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
		start = (dst->pos - 7 * sizeof(int)) /
			sizeof(struct _fpreg_ia32);
		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
				&dst->u.set.kbuf, &dst->u.set.ubuf,
				buf, 7 * sizeof(int),
				sizeof(struct ia32_user_i387_struct));
		if (dst->ret)
			return;
		pt = task_pt_regs(task);
		tos = (task->thread.fsr >> 11) & 7;
		end = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
		for (; start < end; start++)
			access_fpreg_ia32(start,
				(struct _fpreg_ia32 *)buf + start,
				pt, info->sw, tos, 1);
		if (dst->count == 0)
			return;
	}
}

#define OFFSET(member) ((int)(offsetof(struct ia32_user_fxsr_struct, member)))
static void getfpxreg(struct task_struct *task, int start, int end, char *buf)
{
	int min_val;

	min_val = min(end, OFFSET(fop));
	while (start < min_val) {
		if (start == OFFSET(cwd))
			*((short *)buf) = task->thread.fcr & 0xffff;
		else if (start == OFFSET(swd))
			*((short *)buf) = task->thread.fsr & 0xffff;
		else if (start == OFFSET(twd))
			*((short *)buf) = (task->thread.fsr>>16) & 0xffff;
		buf += 2;
		start += 2;
	}
	/* skip fop element */
	if (start == OFFSET(fop)) {
		start += 2;
		buf += 2;
	}
	while (start < end) {
		if (start == OFFSET(fip))
			*((int *)buf) = task->thread.fir;
		else if (start == OFFSET(fcs))
			*((int *)buf) = (task->thread.fir>>32) & 0xffff;
		else if (start == OFFSET(foo))
			*((int *)buf) = task->thread.fdr;
		else if (start == OFFSET(fos))
			*((int *)buf) = (task->thread.fdr>>32) & 0xffff;
		else if (start == OFFSET(mxcsr))
			*((int *)buf) = ((task->thread.fcr>>32) & 0xff80)
					 | ((task->thread.fsr>>32) & 0x3f);
		buf += 4;
		start += 4;
	}
}

static void setfpxreg(struct task_struct *task, int start, int end, char *buf)
{
	int min_val, num32;
	short num;
	unsigned long num64;

	min_val = min(end, OFFSET(fop));
	while (start < min_val) {
		num = *((short *)buf);
		if (start == OFFSET(cwd)) {
			task->thread.fcr = (task->thread.fcr & (~0x1f3f))
						| (num & 0x1f3f);
		} else if (start == OFFSET(swd)) {
			task->thread.fsr = (task->thread.fsr & (~0xffff)) | num;
		} else if (start == OFFSET(twd)) {
			task->thread.fsr = (task->thread.fsr & (~0xffff0000))
				| (((int)num) << 16);
		}
		buf += 2;
		start += 2;
	}
	/* skip fop element */
	if (start == OFFSET(fop)) {
		start += 2;
		buf += 2;
	}
	while (start < end) {
		num32 = *((int *)buf);
		if (start == OFFSET(fip))
			task->thread.fir = (task->thread.fir & (~0xffffffff))
						 | num32;
		else if (start == OFFSET(foo))
			task->thread.fdr = (task->thread.fdr & (~0xffffffff))
						 | num32;
		else if (start == OFFSET(mxcsr)) {
			num64 = num32 & 0xff10;
			task->thread.fcr = (task->thread.fcr &
				(~0xff1000000000UL)) | (num64<<32);
			num64 = num32 & 0x3f;
			task->thread.fsr = (task->thread.fsr &
				(~0x3f00000000UL)) | (num64<<32);
		}
		buf += 4;
		start += 4;
	}
}

static void do_fpxregs_get(struct unw_frame_info *info, void *arg)
{
	struct regset_getset *dst = arg;
	struct task_struct *task = dst->target;
	struct pt_regs *pt;
	char buf[128];
	int start, end, tos;

	if (dst->count == 0 || unw_unwind_to_user(info) < 0)
		return;
	if (dst->pos < OFFSET(st_space[0])) {
		end = min(dst->pos + dst->count, (unsigned int)32);
		getfpxreg(task, dst->pos, end, buf);
		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
				&dst->u.get.kbuf, &dst->u.get.ubuf, buf,
				0, OFFSET(st_space[0]));
		if (dst->ret || dst->count == 0)
			return;
	}
	if (dst->pos < OFFSET(xmm_space[0])) {
		pt = task_pt_regs(task);
		tos = (task->thread.fsr >> 11) & 7;
		end = min(dst->pos + dst->count,
				(unsigned int)OFFSET(xmm_space[0]));
		start = (dst->pos - OFFSET(st_space[0])) / 16;
		end = (end - OFFSET(st_space[0])) / 16;
		for (; start < end; start++)
			access_fpreg_ia32(start, buf + 16 * start, pt,
						info->sw, tos, 0);
		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
				&dst->u.get.kbuf, &dst->u.get.ubuf,
				buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
		if (dst->ret || dst->count == 0)
			return;
	}
	if (dst->pos < OFFSET(padding[0]))
		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
				&dst->u.get.kbuf, &dst->u.get.ubuf,
				&info->sw->f16, OFFSET(xmm_space[0]),
				OFFSET(padding[0]));
}

static void do_fpxregs_set(struct unw_frame_info *info, void *arg)
{
	struct regset_getset *dst = arg;
	struct task_struct *task = dst->target;
	char buf[128];
	int start, end;

	if (dst->count == 0 || unw_unwind_to_user(info) < 0)
		return;

	if (dst->pos < OFFSET(st_space[0])) {
		start = dst->pos;
		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
				&dst->u.set.kbuf, &dst->u.set.ubuf,
				buf, 0, OFFSET(st_space[0]));
		if (dst->ret)
			return;
		setfpxreg(task, start, dst->pos, buf);
		if (dst->count == 0)
			return;
	}
	if (dst->pos < OFFSET(xmm_space[0])) {
		struct pt_regs *pt;
		int tos;
		pt = task_pt_regs(task);
		tos = (task->thread.fsr >> 11) & 7;
		start = (dst->pos - OFFSET(st_space[0])) / 16;
		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
				&dst->u.set.kbuf, &dst->u.set.ubuf,
				buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
		if (dst->ret)
			return;
		end = (dst->pos - OFFSET(st_space[0])) / 16;
		for (; start < end; start++)
			access_fpreg_ia32(start, buf + 16 * start, pt, info->sw,
						 tos, 1);
		if (dst->count == 0)
			return;
	}
	if (dst->pos < OFFSET(padding[0]))
		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
				&dst->u.set.kbuf, &dst->u.set.ubuf,
				&info->sw->f16, OFFSET(xmm_space[0]),
				 OFFSET(padding[0]));
}
#undef OFFSET

static int do_regset_call(void (*call)(struct unw_frame_info *, void *),
		struct task_struct *target,
		const struct user_regset *regset,
		unsigned int pos, unsigned int count,
		const void *kbuf, const void __user *ubuf)
{
	struct regset_getset info = { .target = target, .regset = regset,
		.pos = pos, .count = count,
		.u.set = { .kbuf = kbuf, .ubuf = ubuf },
		.ret = 0 };

	if (target == current)
		unw_init_running(call, &info);
	else {
		struct unw_frame_info ufi;
		memset(&ufi, 0, sizeof(ufi));
		unw_init_from_blocked_task(&ufi, target);
		(*call)(&ufi, &info);
	}

	return info.ret;
}

static int ia32_fpregs_get(struct task_struct *target,
		const struct user_regset *regset,
		unsigned int pos, unsigned int count,
		void *kbuf, void __user *ubuf)
{
	return do_regset_call(do_fpregs_get, target, regset, pos, count,
		kbuf, ubuf);
}

static int ia32_fpregs_set(struct task_struct *target,
		const struct user_regset *regset,
		unsigned int pos, unsigned int count,
		const void *kbuf, const void __user *ubuf)
{
	return do_regset_call(do_fpregs_set, target, regset, pos, count,
		kbuf, ubuf);
}

static int ia32_fpxregs_get(struct task_struct *target,
		const struct user_regset *regset,
		unsigned int pos, unsigned int count,
		void *kbuf, void __user *ubuf)
{
	return do_regset_call(do_fpxregs_get, target, regset, pos, count,
		kbuf, ubuf);
}

static int ia32_fpxregs_set(struct task_struct *target,
		const struct user_regset *regset,
		unsigned int pos, unsigned int count,
		const void *kbuf, const void __user *ubuf)
{
	return do_regset_call(do_fpxregs_set, target, regset, pos, count,
		kbuf, ubuf);
}

static int ia32_genregs_get(struct task_struct *target,
		const struct user_regset *regset,
		unsigned int pos, unsigned int count,
		void *kbuf, void __user *ubuf)
{
	if (kbuf) {
		u32 *kp = kbuf;
		while (count > 0) {
			*kp++ = getreg(target, pos);
			pos += 4;
			count -= 4;
		}
	} else {
		u32 __user *up = ubuf;
		while (count > 0) {
			if (__put_user(getreg(target, pos), up++))
				return -EFAULT;
			pos += 4;
			count -= 4;
		}
	}
	return 0;
}

static int ia32_genregs_set(struct task_struct *target,
		const struct user_regset *regset,
		unsigned int pos, unsigned int count,
		const void *kbuf, const void __user *ubuf)
{
	int ret = 0;

	if (kbuf) {
		const u32 *kp = kbuf;
		while (!ret && count > 0) {
			putreg(target, pos, *kp++);
			pos += 4;
			count -= 4;
		}
	} else {
		const u32 __user *up = ubuf;
		u32 val;
		while (!ret && count > 0) {
			ret = __get_user(val, up++);
			if (!ret)
				putreg(target, pos, val);
			pos += 4;
			count -= 4;
		}
	}
	return ret;
}

static int ia32_tls_active(struct task_struct *target,
		const struct user_regset *regset)
{
	struct thread_struct *t = &target->thread;
	int n = GDT_ENTRY_TLS_ENTRIES;
	while (n > 0 && desc_empty(&t->tls_array[n -1]))
		--n;
	return n;
}

static int ia32_tls_get(struct task_struct *target,
		const struct user_regset *regset, unsigned int pos,
		unsigned int count, void *kbuf, void __user *ubuf)
{
	const struct desc_struct *tls;

	if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) ||
			(pos % sizeof(struct ia32_user_desc)) != 0 ||
			(count % sizeof(struct ia32_user_desc)) != 0)
		return -EINVAL;

	pos /= sizeof(struct ia32_user_desc);
	count /= sizeof(struct ia32_user_desc);

	tls = &target->thread.tls_array[pos];

	if (kbuf) {
		struct ia32_user_desc *info = kbuf;
		while (count-- > 0)
			fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++,
					tls++);
	} else {
		struct ia32_user_desc __user *u_info = ubuf;
		while (count-- > 0) {
			struct ia32_user_desc info;
			fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++);
			if (__copy_to_user(u_info++, &info, sizeof(info)))
				return -EFAULT;
		}
	}

	return 0;
}

static int ia32_tls_set(struct task_struct *target,
		const struct user_regset *regset, unsigned int pos,
		unsigned int count, const void *kbuf, const void __user *ubuf)
{
	struct ia32_user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
	const struct ia32_user_desc *info;

	if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) ||
			(pos % sizeof(struct ia32_user_desc)) != 0 ||
			(count % sizeof(struct ia32_user_desc)) != 0)
		return -EINVAL;

	if (kbuf)
		info = kbuf;
	else if (__copy_from_user(infobuf, ubuf, count))
		return -EFAULT;
	else
		info = infobuf;

	set_tls_desc(target,
		GDT_ENTRY_TLS_MIN + (pos / sizeof(struct ia32_user_desc)),
		info, count / sizeof(struct ia32_user_desc));

	return 0;
}

/*
 * This should match arch/i386/kernel/ptrace.c:native_regsets.
 * XXX ioperm? vm86?
 */
static const struct user_regset ia32_regsets[] = {
	{
		.core_note_type = NT_PRSTATUS,
		.n = sizeof(struct user_regs_struct32)/4,
		.size = 4, .align = 4,
		.get = ia32_genregs_get, .set = ia32_genregs_set
	},
	{
		.core_note_type = NT_PRFPREG,
		.n = sizeof(struct ia32_user_i387_struct) / 4,
		.size = 4, .align = 4,
		.get = ia32_fpregs_get, .set = ia32_fpregs_set
	},
	{
		.core_note_type = NT_PRXFPREG,
		.n = sizeof(struct ia32_user_fxsr_struct) / 4,
		.size = 4, .align = 4,
		.get = ia32_fpxregs_get, .set = ia32_fpxregs_set
	},
	{
		.core_note_type = NT_386_TLS,
		.n = GDT_ENTRY_TLS_ENTRIES,
		.bias = GDT_ENTRY_TLS_MIN,
		.size = sizeof(struct ia32_user_desc),
		.align = sizeof(struct ia32_user_desc),
		.active = ia32_tls_active,
		.get = ia32_tls_get, .set = ia32_tls_set,
	},
};

const struct user_regset_view user_ia32_view = {
	.name = "i386", .e_machine = EM_386,
	.regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets)
};

long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, 
			__u32 len_low, __u32 len_high, int advice)
{ 
Loading