#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "iommu-sva.h"
struct iopf_queue {
struct workqueue_struct *wq;
struct list_head devices;
struct mutex lock;
};
struct iopf_device_param {
struct device *dev;
struct iopf_queue *queue;
struct list_head queue_list;
struct list_head partial;
};
struct iopf_fault {
struct iommu_fault fault;
struct list_head list;
};
struct iopf_group {
struct iopf_fault last_fault;
struct list_head faults;
struct work_struct work;
struct device *dev;
};
static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
enum iommu_page_response_code status)
{
struct iommu_page_response resp = {
.version = IOMMU_PAGE_RESP_VERSION_1,
.pasid = iopf->fault.prm.pasid,
.grpid = iopf->fault.prm.grpid,
.code = status,
};
if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
return iommu_page_response(dev, &resp);
}
static void iopf_handler(struct work_struct *work)
{
struct iopf_group *group;
struct iommu_domain *domain;
struct iopf_fault *iopf, *next;
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
group = container_of(work, struct iopf_group, work);
domain = iommu_get_domain_for_dev_pasid(group->dev,
group->last_fault.fault.prm.pasid, 0);
if (!domain || !domain->iopf_handler)
status = IOMMU_PAGE_RESP_INVALID;
list_for_each_entry_safe(iopf, next, &group->faults, list) {
if (status == IOMMU_PAGE_RESP_SUCCESS)
status = domain->iopf_handler(&iopf->fault,
domain->fault_data);
if (!(iopf->fault.prm.flags &
IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
kfree(iopf);
}
iopf_complete_group(group->dev, &group->last_fault, status);
kfree(group);
}
int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
{
int ret;
struct iopf_group *group;
struct iopf_fault *iopf, *next;
struct iopf_device_param *iopf_param;
struct device *dev = cookie;
struct dev_iommu *param = dev->iommu;
lockdep_assert_held(¶m->lock);
if (fault->type != IOMMU_FAULT_PAGE_REQ)
return -EOPNOTSUPP;
iopf_param = param->iopf_param;
if (!iopf_param)
return -ENODEV;
if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
if (!iopf)
return -ENOMEM;
iopf->fault = *fault;
list_add(&iopf->list, &iopf_param->partial);
return 0;
}
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group) {
ret = -ENOMEM;
goto cleanup_partial;
}
group->dev = dev;
group->last_fault.fault = *fault;
INIT_LIST_HEAD(&group->faults);
list_add(&group->last_fault.list, &group->faults);
INIT_WORK(&group->work, iopf_handler);
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
if (iopf->fault.prm.grpid == fault->prm.grpid)
list_move(&iopf->list, &group->faults);
}
queue_work(iopf_param->queue->wq, &group->work);
return 0;
cleanup_partial:
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
if (iopf->fault.prm.grpid == fault->prm.grpid) {
list_del(&iopf->list);
kfree(iopf);
}
}
return ret;
}
EXPORT_SYMBOL_GPL(iommu_queue_iopf);
int iopf_queue_flush_dev(struct device *dev)
{
int ret = 0;
struct iopf_device_param *iopf_param;
struct dev_iommu *param = dev->iommu;
if (!param)
return -ENODEV;
mutex_lock(¶m->lock);
iopf_param = param->iopf_param;
if (iopf_param)
flush_workqueue(iopf_param->queue->wq);
else
ret = -ENODEV;
mutex_unlock(¶m->lock);
return ret;
}
EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
int iopf_queue_discard_partial(struct iopf_queue *queue)
{
struct iopf_fault *iopf, *next;
struct iopf_device_param *iopf_param;
if (!queue)
return -EINVAL;
mutex_lock(&queue->lock);
list_for_each_entry(iopf_param, &queue->devices, queue_list) {
list_for_each_entry_safe(iopf, next, &iopf_param->partial,
list) {
list_del(&iopf->list);
kfree(iopf);
}
}
mutex_unlock(&queue->lock);
return 0;
}
EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
{
int ret = -EBUSY;
struct iopf_device_param *iopf_param;
struct dev_iommu *param = dev->iommu;
if (!param)
return -ENODEV;
iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL);
if (!iopf_param)
return -ENOMEM;
INIT_LIST_HEAD(&iopf_param->partial);
iopf_param->queue = queue;
iopf_param->dev = dev;
mutex_lock(&queue->lock);
mutex_lock(¶m->lock);
if (!param->iopf_param) {
list_add(&iopf_param->queue_list, &queue->devices);
param->iopf_param = iopf_param;
ret = 0;
}
mutex_unlock(¶m->lock);
mutex_unlock(&queue->lock);
if (ret)
kfree(iopf_param);
return ret;
}
EXPORT_SYMBOL_GPL(iopf_queue_add_device);
int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
{
int ret = -EINVAL;
struct iopf_fault *iopf, *next;
struct iopf_device_param *iopf_param;
struct dev_iommu *param = dev->iommu;
if (!param || !queue)
return -EINVAL;
mutex_lock(&queue->lock);
mutex_lock(¶m->lock);
iopf_param = param->iopf_param;
if (iopf_param && iopf_param->queue == queue) {
list_del(&iopf_param->queue_list);
param->iopf_param = NULL;
ret = 0;
}
mutex_unlock(¶m->lock);
mutex_unlock(&queue->lock);
if (ret)
return ret;
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list)
kfree(iopf);
kfree(iopf_param);
return 0;
}
EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
struct iopf_queue *iopf_queue_alloc(const char *name)
{
struct iopf_queue *queue;
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue)
return NULL;
queue->wq = alloc_workqueue("iopf_queue/%s", WQ_UNBOUND, 0, name);
if (!queue->wq) {
kfree(queue);
return NULL;
}
INIT_LIST_HEAD(&queue->devices);
mutex_init(&queue->lock);
return queue;
}
EXPORT_SYMBOL_GPL(iopf_queue_alloc);
void iopf_queue_free(struct iopf_queue *queue)
{
struct iopf_device_param *iopf_param, *next;
if (!queue)
return;
list_for_each_entry_safe(iopf_param, next, &queue->devices, queue_list)
iopf_queue_remove_device(queue, iopf_param->dev);
destroy_workqueue(queue->wq);
kfree(queue);
}
EXPORT_SYMBOL_GPL