Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 842d223f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (fixes from Andrew)

Merge misc fixes from Andrew Morton:

 - A bunch of fixes

 - Finish off the idr API conversions before someone starts to use the
   old interfaces again.

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  idr: idr_alloc() shouldn't trigger lowmem warning when preloaded
  UAPI: fix endianness conditionals in M32R's asm/stat.h
  UAPI: fix endianness conditionals in linux/raid/md_p.h
  UAPI: fix endianness conditionals in linux/acct.h
  UAPI: fix endianness conditionals in linux/aio_abi.h
  decompressors: fix typo "POWERPC"
  mm/fremap.c: fix oops on error path
  idr: deprecate idr_pre_get() and idr_get_new[_above]()
  tidspbridge: convert to idr_alloc()
  zcache: convert to idr_alloc()
  mlx4: remove leftover idr_pre_get() call
  workqueue: convert to idr_alloc()
  nfsd: convert to idr_alloc()
  nfsd: remove unused get_new_stid()
  kernel/signal.c: use __ARCH_HAS_SA_RESTORER instead of SA_RESTORER
  signal: always clear sa_restorer on execve
  mm: remove_memory(): fix end_pfn setting
  include/linux/res_counter.h needs errno.h
parents ad8395e1 59bfbcf0
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -63,10 +63,10 @@ struct stat64 {
	long long	st_size;
	unsigned long	st_blksize;

#if defined(__BIG_ENDIAN)
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
	unsigned long	__pad4;		/* future possible st_blocks high bits */
	unsigned long	st_blocks;	/* Number 512-byte blocks allocated. */
#elif defined(__LITTLE_ENDIAN)
#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
	unsigned long	st_blocks;	/* Number 512-byte blocks allocated. */
	unsigned long	__pad4;		/* future possible st_blocks high bits */
#else
+0 −1
Original line number Diff line number Diff line
@@ -362,7 +362,6 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
	INIT_LIST_HEAD(&dev->sriov.cm_list);
	dev->sriov.sl_id_map = RB_ROOT;
	idr_init(&dev->sriov.pv_id_table);
	idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL);
}

/* slave = -1 ==> all slaves */
+26 −44
Original line number Diff line number Diff line
@@ -76,37 +76,28 @@ int drv_insert_node_res_element(void *hnode, void *node_resource,
	struct node_res_object **node_res_obj =
	    (struct node_res_object **)node_resource;
	struct process_context *ctxt = (struct process_context *)process_ctxt;
	int status = 0;
	int retval;

	*node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
	if (!*node_res_obj) {
		status = -ENOMEM;
		goto func_end;
	}
	if (!*node_res_obj)
		return -ENOMEM;

	(*node_res_obj)->node = hnode;
	retval = idr_get_new(ctxt->node_id, *node_res_obj,
						&(*node_res_obj)->id);
	if (retval == -EAGAIN) {
		if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) {
			pr_err("%s: OUT OF MEMORY\n", __func__);
			status = -ENOMEM;
			goto func_end;
	retval = idr_alloc(ctxt->node_id, *node_res_obj, 0, 0, GFP_KERNEL);
	if (retval >= 0) {
		(*node_res_obj)->id = retval;
		return 0;
	}

		retval = idr_get_new(ctxt->node_id, *node_res_obj,
						&(*node_res_obj)->id);
	}
	if (retval) {
		pr_err("%s: FAILED, IDR is FULL\n", __func__);
		status = -EFAULT;
	}
func_end:
	if (status)
	kfree(*node_res_obj);

	return status;
	if (retval == -ENOSPC) {
		pr_err("%s: FAILED, IDR is FULL\n", __func__);
		return -EFAULT;
	} else {
		pr_err("%s: OUT OF MEMORY\n", __func__);
		return -ENOMEM;
	}
}

/* Release all Node resources and its context
@@ -201,35 +192,26 @@ int drv_proc_insert_strm_res_element(void *stream_obj,
	struct strm_res_object **pstrm_res =
	    (struct strm_res_object **)strm_res;
	struct process_context *ctxt = (struct process_context *)process_ctxt;
	int status = 0;
	int retval;

	*pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
	if (*pstrm_res == NULL) {
		status = -EFAULT;
		goto func_end;
	}
	if (*pstrm_res == NULL)
		return -EFAULT;

	(*pstrm_res)->stream = stream_obj;
	retval = idr_get_new(ctxt->stream_id, *pstrm_res,
						&(*pstrm_res)->id);
	if (retval == -EAGAIN) {
		if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) {
			pr_err("%s: OUT OF MEMORY\n", __func__);
			status = -ENOMEM;
			goto func_end;
	retval = idr_alloc(ctxt->stream_id, *pstrm_res, 0, 0, GFP_KERNEL);
	if (retval >= 0) {
		(*pstrm_res)->id = retval;
		return 0;
	}

		retval = idr_get_new(ctxt->stream_id, *pstrm_res,
						&(*pstrm_res)->id);
	}
	if (retval) {
	if (retval == -ENOSPC) {
		pr_err("%s: FAILED, IDR is FULL\n", __func__);
		status = -EPERM;
		return -EPERM;
	} else {
		pr_err("%s: OUT OF MEMORY\n", __func__);
		return -ENOMEM;
	}

func_end:
	return status;
}

static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
+10 −15
Original line number Diff line number Diff line
@@ -300,27 +300,22 @@ static u8 r2net_num_from_nn(struct r2net_node *nn)

static int r2net_prep_nsw(struct r2net_node *nn, struct r2net_status_wait *nsw)
{
	int ret = 0;
	int ret;

	do {
		if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
			ret = -EAGAIN;
			break;
		}
	spin_lock(&nn->nn_lock);
		ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id);
		if (ret == 0)
			list_add_tail(&nsw->ns_node_item,
				      &nn->nn_status_list);
	ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC);
	if (ret >= 0) {
		nsw->ns_id = ret;
		list_add_tail(&nsw->ns_node_item, &nn->nn_status_list);
	}
	spin_unlock(&nn->nn_lock);
	} while (ret == -EAGAIN);

	if (ret == 0)  {
	if (ret >= 0) {
		init_waitqueue_head(&nsw->ns_wq);
		nsw->ns_sys_status = R2NET_ERR_NONE;
		nsw->ns_status = 0;
		return 0;
	}

	return ret;
}

+2 −34
Original line number Diff line number Diff line
@@ -230,37 +230,6 @@ static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
		__nfs4_file_put_access(fp, oflag);
}

static inline int get_new_stid(struct nfs4_stid *stid)
{
	static int min_stateid = 0;
	struct idr *stateids = &stid->sc_client->cl_stateids;
	int new_stid;
	int error;

	error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
	/*
	 * Note: the necessary preallocation was done in
	 * nfs4_alloc_stateid().  The idr code caps the number of
	 * preallocations that can exist at a time, but the state lock
	 * prevents anyone from using ours before we get here:
	 */
	WARN_ON_ONCE(error);
	/*
	 * It shouldn't be a problem to reuse an opaque stateid value.
	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
	 * example, a stray write retransmission could be accepted by
	 * the server when it should have been rejected.  Therefore,
	 * adopt a trick from the sctp code to attempt to maximize the
	 * amount of time until an id is reused, by ensuring they always
	 * "increase" (mod INT_MAX):
	 */

	min_stateid = new_stid+1;
	if (min_stateid == INT_MAX)
		min_stateid = 0;
	return new_stid;
}

static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct
kmem_cache *slab)
{
@@ -273,9 +242,8 @@ kmem_cache *slab)
	if (!stid)
		return NULL;

	if (!idr_pre_get(stateids, GFP_KERNEL))
		goto out_free;
	if (idr_get_new_above(stateids, stid, min_stateid, &new_id))
	new_id = idr_alloc(stateids, stid, min_stateid, 0, GFP_KERNEL);
	if (new_id < 0)
		goto out_free;
	stid->sc_client = cl;
	stid->sc_type = 0;
Loading