Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 524a6404 authored by Oded Gabbay's avatar Oded Gabbay
Browse files

drm/amdkfd: Do copy_to/from_user in general kfd_ioctl()



This patch moves the copy_to_user() and copy_from_user() calls from the
different ioctl functions in amdkfd to the general kfd_ioctl() function, as
this is a common code for all ioctls.

This was done according to example taken from drm_ioctl.c

Signed-off-by: default avatarOded Gabbay <oded.gabbay@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
parent 2030664b
Loading
Loading
Loading
Loading
+117 −117
Original line number Original line Diff line number Diff line
@@ -126,17 +126,14 @@ static int kfd_open(struct inode *inode, struct file *filep)
	return 0;
	return 0;
}
}


static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
					void __user *arg)
					void *data)
{
{
	struct kfd_ioctl_get_version_args args;
	struct kfd_ioctl_get_version_args *args = data;
	int err = 0;
	int err = 0;


	args.major_version = KFD_IOCTL_MAJOR_VERSION;
	args->major_version = KFD_IOCTL_MAJOR_VERSION;
	args.minor_version = KFD_IOCTL_MINOR_VERSION;
	args->minor_version = KFD_IOCTL_MINOR_VERSION;

	if (copy_to_user(arg, &args, sizeof(args)))
		err = -EFAULT;


	return err;
	return err;
}
}
@@ -220,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
	return 0;
	return 0;
}
}


static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
					void __user *arg)
					void *data)
{
{
	struct kfd_ioctl_create_queue_args args;
	struct kfd_ioctl_create_queue_args *args = data;
	struct kfd_dev *dev;
	struct kfd_dev *dev;
	int err = 0;
	int err = 0;
	unsigned int queue_id;
	unsigned int queue_id;
@@ -232,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,


	memset(&q_properties, 0, sizeof(struct queue_properties));
	memset(&q_properties, 0, sizeof(struct queue_properties));


	if (copy_from_user(&args, arg, sizeof(args)))
		return -EFAULT;

	pr_debug("kfd: creating queue ioctl\n");
	pr_debug("kfd: creating queue ioctl\n");


	err = set_queue_properties_from_user(&q_properties, &args);
	err = set_queue_properties_from_user(&q_properties, args);
	if (err)
	if (err)
		return err;
		return err;


	dev = kfd_device_by_id(args.gpu_id);
	dev = kfd_device_by_id(args->gpu_id);
	if (dev == NULL)
	if (dev == NULL)
		return -EINVAL;
		return -EINVAL;


@@ -249,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,


	pdd = kfd_bind_process_to_device(dev, p);
	pdd = kfd_bind_process_to_device(dev, p);
	if (IS_ERR(pdd)) {
	if (IS_ERR(pdd)) {
		err = PTR_ERR(pdd);
		err = -ESRCH;
		goto err_bind_process;
		goto err_bind_process;
	}
	}


@@ -262,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
	if (err != 0)
	if (err != 0)
		goto err_create_queue;
		goto err_create_queue;


	args.queue_id = queue_id;
	args->queue_id = queue_id;


	/* Return gpu_id as doorbell offset for mmap usage */
	/* Return gpu_id as doorbell offset for mmap usage */
	args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
	args->doorbell_offset = args->gpu_id << PAGE_SHIFT;

	if (copy_to_user(arg, &args, sizeof(args))) {
		err = -EFAULT;
		goto err_copy_args_out;
	}


	mutex_unlock(&p->mutex);
	mutex_unlock(&p->mutex);


	pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
	pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);


	pr_debug("ring buffer address == 0x%016llX\n",
	pr_debug("ring buffer address == 0x%016llX\n",
			args.ring_base_address);
			args->ring_base_address);


	pr_debug("read ptr address    == 0x%016llX\n",
	pr_debug("read ptr address    == 0x%016llX\n",
			args.read_pointer_address);
			args->read_pointer_address);


	pr_debug("write ptr address   == 0x%016llX\n",
	pr_debug("write ptr address   == 0x%016llX\n",
			args.write_pointer_address);
			args->write_pointer_address);


	return 0;
	return 0;


err_copy_args_out:
	pqm_destroy_queue(&p->pqm, queue_id);
err_create_queue:
err_create_queue:
err_bind_process:
err_bind_process:
	mutex_unlock(&p->mutex);
	mutex_unlock(&p->mutex);
@@ -296,99 +283,90 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
}
}


static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
					void __user *arg)
					void *data)
{
{
	int retval;
	int retval;
	struct kfd_ioctl_destroy_queue_args args;
	struct kfd_ioctl_destroy_queue_args *args = data;

	if (copy_from_user(&args, arg, sizeof(args)))
		return -EFAULT;


	pr_debug("kfd: destroying queue id %d for PASID %d\n",
	pr_debug("kfd: destroying queue id %d for PASID %d\n",
				args.queue_id,
				args->queue_id,
				p->pasid);
				p->pasid);


	mutex_lock(&p->mutex);
	mutex_lock(&p->mutex);


	retval = pqm_destroy_queue(&p->pqm, args.queue_id);
	retval = pqm_destroy_queue(&p->pqm, args->queue_id);


	mutex_unlock(&p->mutex);
	mutex_unlock(&p->mutex);
	return retval;
	return retval;
}
}


static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
					void __user *arg)
					void *data)
{
{
	int retval;
	int retval;
	struct kfd_ioctl_update_queue_args args;
	struct kfd_ioctl_update_queue_args *args = data;
	struct queue_properties properties;
	struct queue_properties properties;


	if (copy_from_user(&args, arg, sizeof(args)))
	if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
		return -EFAULT;

	if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
		pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
		pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
		return -EINVAL;
		return -EINVAL;
	}
	}


	if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
	if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
		pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
		pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
		return -EINVAL;
		return -EINVAL;
	}
	}


	if ((args.ring_base_address) &&
	if ((args->ring_base_address) &&
		(!access_ok(VERIFY_WRITE,
		(!access_ok(VERIFY_WRITE,
			(const void __user *) args.ring_base_address,
			(const void __user *) args->ring_base_address,
			sizeof(uint64_t)))) {
			sizeof(uint64_t)))) {
		pr_err("kfd: can't access ring base address\n");
		pr_err("kfd: can't access ring base address\n");
		return -EFAULT;
		return -EFAULT;
	}
	}


	if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
	if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
		pr_err("kfd: ring size must be a power of 2 or 0\n");
		pr_err("kfd: ring size must be a power of 2 or 0\n");
		return -EINVAL;
		return -EINVAL;
	}
	}


	properties.queue_address = args.ring_base_address;
	properties.queue_address = args->ring_base_address;
	properties.queue_size = args.ring_size;
	properties.queue_size = args->ring_size;
	properties.queue_percent = args.queue_percentage;
	properties.queue_percent = args->queue_percentage;
	properties.priority = args.queue_priority;
	properties.priority = args->queue_priority;


	pr_debug("kfd: updating queue id %d for PASID %d\n",
	pr_debug("kfd: updating queue id %d for PASID %d\n",
			args.queue_id, p->pasid);
			args->queue_id, p->pasid);


	mutex_lock(&p->mutex);
	mutex_lock(&p->mutex);


	retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
	retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);


	mutex_unlock(&p->mutex);
	mutex_unlock(&p->mutex);


	return retval;
	return retval;
}
}


static long kfd_ioctl_set_memory_policy(struct file *filep,
static int kfd_ioctl_set_memory_policy(struct file *filep,
				struct kfd_process *p, void __user *arg)
					struct kfd_process *p, void *data)
{
{
	struct kfd_ioctl_set_memory_policy_args args;
	struct kfd_ioctl_set_memory_policy_args *args = data;
	struct kfd_dev *dev;
	struct kfd_dev *dev;
	int err = 0;
	int err = 0;
	struct kfd_process_device *pdd;
	struct kfd_process_device *pdd;
	enum cache_policy default_policy, alternate_policy;
	enum cache_policy default_policy, alternate_policy;


	if (copy_from_user(&args, arg, sizeof(args)))
	if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
		return -EFAULT;
	    && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {

	if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
	    && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
		return -EINVAL;
		return -EINVAL;
	}
	}


	if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
	if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
	    && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
	    && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
		return -EINVAL;
		return -EINVAL;
	}
	}


	dev = kfd_device_by_id(args.gpu_id);
	dev = kfd_device_by_id(args->gpu_id);
	if (dev == NULL)
	if (dev == NULL)
		return -EINVAL;
		return -EINVAL;


@@ -396,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,


	pdd = kfd_bind_process_to_device(dev, p);
	pdd = kfd_bind_process_to_device(dev, p);
	if (IS_ERR(pdd)) {
	if (IS_ERR(pdd)) {
		err = PTR_ERR(pdd);
		err = -ESRCH;
		goto out;
		goto out;
	}
	}


	default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
	default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
			 ? cache_policy_coherent : cache_policy_noncoherent;
			 ? cache_policy_coherent : cache_policy_noncoherent;


	alternate_policy =
	alternate_policy =
		(args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
		(args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
		   ? cache_policy_coherent : cache_policy_noncoherent;
		   ? cache_policy_coherent : cache_policy_noncoherent;


	if (!dev->dqm->set_cache_memory_policy(dev->dqm,
	if (!dev->dqm->set_cache_memory_policy(dev->dqm,
				&pdd->qpd,
				&pdd->qpd,
				default_policy,
				default_policy,
				alternate_policy,
				alternate_policy,
				(void __user *)args.alternate_aperture_base,
				(void __user *)args->alternate_aperture_base,
				args.alternate_aperture_size))
				args->alternate_aperture_size))
		err = -EINVAL;
		err = -EINVAL;


out:
out:
@@ -421,53 +399,44 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
	return err;
	return err;
}
}


static long kfd_ioctl_get_clock_counters(struct file *filep,
static int kfd_ioctl_get_clock_counters(struct file *filep,
				struct kfd_process *p, void __user *arg)
				struct kfd_process *p, void *data)
{
{
	struct kfd_ioctl_get_clock_counters_args args;
	struct kfd_ioctl_get_clock_counters_args *args = data;
	struct kfd_dev *dev;
	struct kfd_dev *dev;
	struct timespec time;
	struct timespec time;


	if (copy_from_user(&args, arg, sizeof(args)))
	dev = kfd_device_by_id(args->gpu_id);
		return -EFAULT;

	dev = kfd_device_by_id(args.gpu_id);
	if (dev == NULL)
	if (dev == NULL)
		return -EINVAL;
		return -EINVAL;


	/* Reading GPU clock counter from KGD */
	/* Reading GPU clock counter from KGD */
	args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
	args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);


	/* No access to rdtsc. Using raw monotonic time */
	/* No access to rdtsc. Using raw monotonic time */
	getrawmonotonic(&time);
	getrawmonotonic(&time);
	args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
	args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);


	get_monotonic_boottime(&time);
	get_monotonic_boottime(&time);
	args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
	args->system_clock_counter = (uint64_t)timespec_to_ns(&time);


	/* Since the counter is in nano-seconds we use 1GHz frequency */
	/* Since the counter is in nano-seconds we use 1GHz frequency */
	args.system_clock_freq = 1000000000;
	args->system_clock_freq = 1000000000;

	if (copy_to_user(arg, &args, sizeof(args)))
		return -EFAULT;


	return 0;
	return 0;
}
}




static int kfd_ioctl_get_process_apertures(struct file *filp,
static int kfd_ioctl_get_process_apertures(struct file *filp,
				struct kfd_process *p, void __user *arg)
				struct kfd_process *p, void *data)
{
{
	struct kfd_ioctl_get_process_apertures_args args;
	struct kfd_ioctl_get_process_apertures_args *args = data;
	struct kfd_process_device_apertures *pAperture;
	struct kfd_process_device_apertures *pAperture;
	struct kfd_process_device *pdd;
	struct kfd_process_device *pdd;


	dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
	dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);


	if (copy_from_user(&args, arg, sizeof(args)))
	args->num_of_nodes = 0;
		return -EFAULT;

	args.num_of_nodes = 0;


	mutex_lock(&p->mutex);
	mutex_lock(&p->mutex);


@@ -476,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
		/* Run over all pdd of the process */
		/* Run over all pdd of the process */
		pdd = kfd_get_first_process_device_data(p);
		pdd = kfd_get_first_process_device_data(p);
		do {
		do {
			pAperture = &args.process_apertures[args.num_of_nodes];
			pAperture =
				&args->process_apertures[args->num_of_nodes];
			pAperture->gpu_id = pdd->dev->id;
			pAperture->gpu_id = pdd->dev->id;
			pAperture->lds_base = pdd->lds_base;
			pAperture->lds_base = pdd->lds_base;
			pAperture->lds_limit = pdd->lds_limit;
			pAperture->lds_limit = pdd->lds_limit;
@@ -486,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
			pAperture->scratch_limit = pdd->scratch_limit;
			pAperture->scratch_limit = pdd->scratch_limit;


			dev_dbg(kfd_device,
			dev_dbg(kfd_device,
				"node id %u\n", args.num_of_nodes);
				"node id %u\n", args->num_of_nodes);
			dev_dbg(kfd_device,
			dev_dbg(kfd_device,
				"gpu id %u\n", pdd->dev->id);
				"gpu id %u\n", pdd->dev->id);
			dev_dbg(kfd_device,
			dev_dbg(kfd_device,
@@ -502,23 +472,23 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
			dev_dbg(kfd_device,
			dev_dbg(kfd_device,
				"scratch_limit %llX\n", pdd->scratch_limit);
				"scratch_limit %llX\n", pdd->scratch_limit);


			args.num_of_nodes++;
			args->num_of_nodes++;
		} while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
		} while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
				(args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
				(args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
	}
	}


	mutex_unlock(&p->mutex);
	mutex_unlock(&p->mutex);


	if (copy_to_user(arg, &args, sizeof(args)))
		return -EFAULT;

	return 0;
	return 0;
}
}


static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
{
	struct kfd_process *process;
	struct kfd_process *process;
	long err = -EINVAL;
	char stack_kdata[128];
	char *kdata = NULL;
	unsigned int usize, asize;
	int retcode = -EINVAL;


	dev_dbg(kfd_device,
	dev_dbg(kfd_device,
		"ioctl cmd 0x%x (#%d), arg 0x%lx\n",
		"ioctl cmd 0x%x (#%d), arg 0x%lx\n",
@@ -528,54 +498,84 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
	if (IS_ERR(process))
	if (IS_ERR(process))
		return PTR_ERR(process);
		return PTR_ERR(process);


	if (cmd & (IOC_IN | IOC_OUT)) {
		if (asize <= sizeof(stack_kdata)) {
			kdata = stack_kdata;
		} else {
			kdata = kmalloc(asize, GFP_KERNEL);
			if (!kdata) {
				retcode = -ENOMEM;
				goto err_i1;
			}
		}
		if (asize > usize)
			memset(kdata + usize, 0, asize - usize);
	}

	if (cmd & IOC_IN) {
		if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
			retcode = -EFAULT;
			goto err_i1;
		}
	} else if (cmd & IOC_OUT) {
		memset(kdata, 0, usize);
	}


	switch (cmd) {
	switch (cmd) {
	case KFD_IOC_GET_VERSION:
	case KFD_IOC_GET_VERSION:
		err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
		retcode = kfd_ioctl_get_version(filep, process, kdata);
		break;
		break;
	case KFD_IOC_CREATE_QUEUE:
	case KFD_IOC_CREATE_QUEUE:
		err = kfd_ioctl_create_queue(filep, process,
		retcode = kfd_ioctl_create_queue(filep, process,
						(void __user *)arg);
						kdata);
		break;
		break;


	case KFD_IOC_DESTROY_QUEUE:
	case KFD_IOC_DESTROY_QUEUE:
		err = kfd_ioctl_destroy_queue(filep, process,
		retcode = kfd_ioctl_destroy_queue(filep, process,
						(void __user *)arg);
						kdata);
		break;
		break;


	case KFD_IOC_SET_MEMORY_POLICY:
	case KFD_IOC_SET_MEMORY_POLICY:
		err = kfd_ioctl_set_memory_policy(filep, process,
		retcode = kfd_ioctl_set_memory_policy(filep, process,
						(void __user *)arg);
						kdata);
		break;
		break;


	case KFD_IOC_GET_CLOCK_COUNTERS:
	case KFD_IOC_GET_CLOCK_COUNTERS:
		err = kfd_ioctl_get_clock_counters(filep, process,
		retcode = kfd_ioctl_get_clock_counters(filep, process,
						(void __user *)arg);
						kdata);
		break;
		break;


	case KFD_IOC_GET_PROCESS_APERTURES:
	case KFD_IOC_GET_PROCESS_APERTURES:
		err = kfd_ioctl_get_process_apertures(filep, process,
		retcode = kfd_ioctl_get_process_apertures(filep, process,
						(void __user *)arg);
						kdata);
		break;
		break;


	case KFD_IOC_UPDATE_QUEUE:
	case KFD_IOC_UPDATE_QUEUE:
		err = kfd_ioctl_update_queue(filep, process,
		retcode = kfd_ioctl_update_queue(filep, process,
						(void __user *)arg);
						kdata);
		break;
		break;


	default:
	default:
		dev_err(kfd_device,
		dev_dbg(kfd_device,
			"unknown ioctl cmd 0x%x, arg 0x%lx)\n",
			"unknown ioctl cmd 0x%x, arg 0x%lx)\n",
			cmd, arg);
			cmd, arg);
		err = -EINVAL;
		retcode = -EINVAL;
		break;
		break;
	}
	}


	if (err < 0)
	if (cmd & IOC_OUT)
		dev_err(kfd_device,
		if (copy_to_user((void __user *)arg, kdata, usize) != 0)
			"ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
			retcode = -EFAULT;
			err, cmd, _IOC_NR(cmd));


	return err;
err_i1:
	if (kdata != stack_kdata)
		kfree(kdata);

	if (retcode)
		dev_dbg(kfd_device, "ret = %d\n", retcode);

	return retcode;
}
}


static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)