Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aba16dc5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull IDA updates from Matthew Wilcox:
 "A better IDA API:

      id = ida_alloc(ida, GFP_xxx);
      ida_free(ida, id);

  rather than the cumbersome ida_simple_get(), ida_simple_remove().

  The new IDA API is similar to ida_simple_get() but better named.  The
  internal restructuring of the IDA code removes the bitmap
  preallocation nonsense.

  I hope the net -200 lines of code is convincing"

* 'ida-4.19' of git://git.infradead.org/users/willy/linux-dax: (29 commits)
  ida: Change ida_get_new_above to return the id
  ida: Remove old API
  test_ida: check_ida_destroy and check_ida_alloc
  test_ida: Convert check_ida_conv to new API
  test_ida: Move ida_check_max
  test_ida: Move ida_check_leaf
  idr-test: Convert ida_check_nomem to new API
  ida: Start new test_ida module
  target/iscsi: Allocate session IDs from an IDA
  iscsi target: fix session creation failure handling
  drm/vmwgfx: Convert to new IDA API
  dmaengine: Convert to new IDA API
  ppc: Convert vas ID allocation to new IDA API
  media: Convert entity ID allocation to new IDA API
  ppc: Convert mmu context allocation to new IDA API
  Convert net_namespace to new IDA API
  cb710: Convert to new IDA API
  rsxx: Convert to new IDA API
  osd: Convert to new IDA API
  sd: Convert to new IDA API
  ...
parents c4726e77 1df89519
Loading
Loading
Loading
Loading
+4 −40
Original line number Diff line number Diff line
@@ -26,48 +26,16 @@
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>

static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida);

static int alloc_context_id(int min_id, int max_id)
{
	int index, err;

again:
	if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
		return -ENOMEM;

	spin_lock(&mmu_context_lock);
	err = ida_get_new_above(&mmu_context_ida, min_id, &index);
	spin_unlock(&mmu_context_lock);

	if (err == -EAGAIN)
		goto again;
	else if (err)
		return err;

	if (index > max_id) {
		spin_lock(&mmu_context_lock);
		ida_remove(&mmu_context_ida, index);
		spin_unlock(&mmu_context_lock);
		return -ENOMEM;
	}

	return index;
	return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
}

void hash__reserve_context_id(int id)
{
	int rc, result = 0;

	do {
		if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
			break;

		spin_lock(&mmu_context_lock);
		rc = ida_get_new_above(&mmu_context_ida, id, &result);
		spin_unlock(&mmu_context_lock);
	} while (rc == -EAGAIN);
	int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);

	WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
}
@@ -172,9 +140,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)

void __destroy_context(int context_id)
{
	spin_lock(&mmu_context_lock);
	ida_remove(&mmu_context_ida, context_id);
	spin_unlock(&mmu_context_lock);
	ida_free(&mmu_context_ida, context_id);
}
EXPORT_SYMBOL_GPL(__destroy_context);

@@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx)
{
	int index, context_id;

	spin_lock(&mmu_context_lock);
	for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
		context_id = ctx->extended_id[index];
		if (context_id)
			ida_remove(&mmu_context_ida, context_id);
			ida_free(&mmu_context_ida, context_id);
	}
	spin_unlock(&mmu_context_lock);
}

static void pte_frag_destroy(void *pte_frag)
+4 −22
Original line number Diff line number Diff line
@@ -515,35 +515,17 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx)
	return 0;
}

static DEFINE_SPINLOCK(vas_ida_lock);

static void vas_release_window_id(struct ida *ida, int winid)
{
	spin_lock(&vas_ida_lock);
	ida_remove(ida, winid);
	spin_unlock(&vas_ida_lock);
	ida_free(ida, winid);
}

static int vas_assign_window_id(struct ida *ida)
{
	int rc, winid;

	do {
		rc = ida_pre_get(ida, GFP_KERNEL);
		if (!rc)
			return -EAGAIN;

		spin_lock(&vas_ida_lock);
		rc = ida_get_new(ida, &winid);
		spin_unlock(&vas_ida_lock);
	} while (rc == -EAGAIN);

	if (rc)
		return rc;
	int winid = ida_alloc_max(ida, VAS_WINDOWS_PER_CHIP - 1, GFP_KERNEL);

	if (winid > VAS_WINDOWS_PER_CHIP) {
		pr_err("Too many (%d) open windows\n", winid);
		vas_release_window_id(ida, winid);
	if (winid == -ENOSPC) {
		pr_err("Too many (%d) open windows\n", VAS_WINDOWS_PER_CHIP);
		return -EAGAIN;
	}

+6 −23
Original line number Diff line number Diff line
@@ -118,7 +118,6 @@ static struct dentry *dfs_device_status;

static u32 cpu_use[NR_CPUS];

static DEFINE_SPINLOCK(rssd_index_lock);
static DEFINE_IDA(rssd_index_ida);

static int mtip_block_initialize(struct driver_data *dd);
@@ -3767,20 +3766,10 @@ static int mtip_block_initialize(struct driver_data *dd)
		goto alloc_disk_error;
	}

	/* Generate the disk name, implemented same as in sd.c */
	do {
		if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL)) {
			rv = -ENOMEM;
			goto ida_get_error;
		}

		spin_lock(&rssd_index_lock);
		rv = ida_get_new(&rssd_index_ida, &index);
		spin_unlock(&rssd_index_lock);
	} while (rv == -EAGAIN);

	if (rv)
	rv = ida_alloc(&rssd_index_ida, GFP_KERNEL);
	if (rv < 0)
		goto ida_get_error;
	index = rv;

	rv = rssd_disk_name_format("rssd",
				index,
@@ -3922,9 +3911,7 @@ static int mtip_block_initialize(struct driver_data *dd)
block_queue_alloc_tag_error:
	mtip_hw_debugfs_exit(dd);
disk_index_error:
	spin_lock(&rssd_index_lock);
	ida_remove(&rssd_index_ida, index);
	spin_unlock(&rssd_index_lock);
	ida_free(&rssd_index_ida, index);

ida_get_error:
	put_disk(dd->disk);
@@ -4012,9 +3999,7 @@ static int mtip_block_remove(struct driver_data *dd)
	}
	dd->disk  = NULL;

	spin_lock(&rssd_index_lock);
	ida_remove(&rssd_index_ida, dd->index);
	spin_unlock(&rssd_index_lock);
	ida_free(&rssd_index_ida, dd->index);

	/* De-initialize the protocol layer. */
	mtip_hw_exit(dd);
@@ -4054,9 +4039,7 @@ static int mtip_block_shutdown(struct driver_data *dd)
		dd->queue = NULL;
	}

	spin_lock(&rssd_index_lock);
	ida_remove(&rssd_index_ida, dd->index);
	spin_unlock(&rssd_index_lock);
	ida_free(&rssd_index_ida, dd->index);
	return 0;
}

+5 −16
Original line number Diff line number Diff line
@@ -58,7 +58,6 @@ MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete "
			     "until the card startup has completed.");

static DEFINE_IDA(rsxx_disk_ida);
static DEFINE_SPINLOCK(rsxx_ida_lock);

/* --------------------Debugfs Setup ------------------- */

@@ -771,19 +770,10 @@ static int rsxx_pci_probe(struct pci_dev *dev,
	card->dev = dev;
	pci_set_drvdata(dev, card);

	do {
		if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) {
			st = -ENOMEM;
			goto failed_ida_get;
		}

		spin_lock(&rsxx_ida_lock);
		st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
		spin_unlock(&rsxx_ida_lock);
	} while (st == -EAGAIN);

	if (st)
	st = ida_alloc(&rsxx_disk_ida, GFP_KERNEL);
	if (st < 0)
		goto failed_ida_get;
	card->disk_id = st;

	st = pci_enable_device(dev);
	if (st)
@@ -985,9 +975,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
failed_dma_mask:
	pci_disable_device(dev);
failed_enable:
	spin_lock(&rsxx_ida_lock);
	ida_remove(&rsxx_disk_ida, card->disk_id);
	spin_unlock(&rsxx_ida_lock);
	ida_free(&rsxx_disk_ida, card->disk_id);
failed_ida_get:
	kfree(card);

@@ -1050,6 +1038,7 @@ static void rsxx_pci_remove(struct pci_dev *dev)
	pci_disable_device(dev);
	pci_release_regions(dev);

	ida_free(&rsxx_disk_ida, card->disk_id);
	kfree(card);
}

+7 −16
Original line number Diff line number Diff line
@@ -161,9 +161,7 @@ static void chan_dev_release(struct device *dev)

	chan_dev = container_of(dev, typeof(*chan_dev), device);
	if (atomic_dec_and_test(chan_dev->idr_ref)) {
		mutex_lock(&dma_list_mutex);
		ida_remove(&dma_ida, chan_dev->dev_id);
		mutex_unlock(&dma_list_mutex);
		ida_free(&dma_ida, chan_dev->dev_id);
		kfree(chan_dev->idr_ref);
	}
	kfree(chan_dev);
@@ -898,17 +896,12 @@ static bool device_has_all_tx_types(struct dma_device *device)

static int get_dma_id(struct dma_device *device)
{
	int rc;

	do {
		if (!ida_pre_get(&dma_ida, GFP_KERNEL))
			return -ENOMEM;
		mutex_lock(&dma_list_mutex);
		rc = ida_get_new(&dma_ida, &device->dev_id);
		mutex_unlock(&dma_list_mutex);
	} while (rc == -EAGAIN);
	int rc = ida_alloc(&dma_ida, GFP_KERNEL);

	if (rc < 0)
		return rc;
	device->dev_id = rc;
	return 0;
}

/**
@@ -1092,9 +1085,7 @@ int dma_async_device_register(struct dma_device *device)
err_out:
	/* if we never registered a channel just release the idr */
	if (atomic_read(idr_ref) == 0) {
		mutex_lock(&dma_list_mutex);
		ida_remove(&dma_ida, device->dev_id);
		mutex_unlock(&dma_list_mutex);
		ida_free(&dma_ida, device->dev_id);
		kfree(idr_ref);
		return rc;
	}
Loading