Commit 5ef3166e authored by Frederic Barrat's avatar Frederic Barrat Committed by Michael Ellerman

ocxl: Driver code for 'generic' opencapi devices

Add an ocxl driver to handle generic opencapi devices. Of course, it's
not meant to be the only opencapi driver, any device is free to
implement its own. But if a host application only needs basic services
like attaching to an opencapi adapter, have translation faults handled
or allocate AFU interrupts, it should suffice.

The AFU config space must follow the opencapi specification and use
the expected vendor/device ID to be seen by the generic driver.

The driver exposes the device AFUs as a char device in /dev/ocxl/

Note that the driver currently doesn't handle memory attached to the
opencapi device.
Signed-off-by: default avatarFrederic Barrat <fbarrat@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: default avatarAlastair D'Silva <alastair@d-silva.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 2cb3d64b
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/sched/mm.h>
#include "ocxl_internal.h"
struct ocxl_context *ocxl_context_alloc(void)
{
return kzalloc(sizeof(struct ocxl_context), GFP_KERNEL);
}
int ocxl_context_init(struct ocxl_context *ctx, struct ocxl_afu *afu,
struct address_space *mapping)
{
int pasid;
ctx->afu = afu;
mutex_lock(&afu->contexts_lock);
pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base,
afu->pasid_base + afu->pasid_max, GFP_KERNEL);
if (pasid < 0) {
mutex_unlock(&afu->contexts_lock);
return pasid;
}
afu->pasid_count++;
mutex_unlock(&afu->contexts_lock);
ctx->pasid = pasid;
ctx->status = OPENED;
mutex_init(&ctx->status_mutex);
ctx->mapping = mapping;
mutex_init(&ctx->mapping_lock);
init_waitqueue_head(&ctx->events_wq);
mutex_init(&ctx->xsl_error_lock);
/*
* Keep a reference on the AFU to make sure it's valid for the
* duration of the life of the context
*/
ocxl_afu_get(afu);
return 0;
}
/*
* Callback for when a translation fault triggers an error
* data: a pointer to the context which triggered the fault
* addr: the address that triggered the error
* dsisr: the value of the PPC64 dsisr register
*/
static void xsl_fault_error(void *data, u64 addr, u64 dsisr)
{
struct ocxl_context *ctx = (struct ocxl_context *) data;
mutex_lock(&ctx->xsl_error_lock);
ctx->xsl_error.addr = addr;
ctx->xsl_error.dsisr = dsisr;
ctx->xsl_error.count++;
mutex_unlock(&ctx->xsl_error_lock);
wake_up_all(&ctx->events_wq);
}
int ocxl_context_attach(struct ocxl_context *ctx, u64 amr)
{
int rc;
mutex_lock(&ctx->status_mutex);
if (ctx->status != OPENED) {
rc = -EIO;
goto out;
}
rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid,
current->mm->context.id, 0, amr, current->mm,
xsl_fault_error, ctx);
if (rc)
goto out;
ctx->status = ATTACHED;
out:
mutex_unlock(&ctx->status_mutex);
return rc;
}
static int map_pp_mmio(struct vm_area_struct *vma, unsigned long address,
u64 offset, struct ocxl_context *ctx)
{
u64 pp_mmio_addr;
int pasid_off;
if (offset >= ctx->afu->config.pp_mmio_stride)
return VM_FAULT_SIGBUS;
mutex_lock(&ctx->status_mutex);
if (ctx->status != ATTACHED) {
mutex_unlock(&ctx->status_mutex);
pr_debug("%s: Context not attached, failing mmio mmap\n",
__func__);
return VM_FAULT_SIGBUS;
}
pasid_off = ctx->pasid - ctx->afu->pasid_base;
pp_mmio_addr = ctx->afu->pp_mmio_start +
pasid_off * ctx->afu->config.pp_mmio_stride +
offset;
vm_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT);
mutex_unlock(&ctx->status_mutex);
return VM_FAULT_NOPAGE;
}
static int ocxl_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ocxl_context *ctx = vma->vm_file->private_data;
u64 offset;
int rc;
offset = vmf->pgoff << PAGE_SHIFT;
pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__,
ctx->pasid, vmf->address, offset);
rc = map_pp_mmio(vma, vmf->address, offset, ctx);
return rc;
}
static const struct vm_operations_struct ocxl_vmops = {
.fault = ocxl_mmap_fault,
};
static int check_mmap_mmio(struct ocxl_context *ctx,
struct vm_area_struct *vma)
{
if ((vma_pages(vma) + vma->vm_pgoff) >
(ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT))
return -EINVAL;
return 0;
}
int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma)
{
int rc;
rc = check_mmap_mmio(ctx, vma);
if (rc)
return rc;
vma->vm_flags |= VM_IO | VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &ocxl_vmops;
return 0;
}
int ocxl_context_detach(struct ocxl_context *ctx)
{
struct pci_dev *dev;
int afu_control_pos;
enum ocxl_context_status status;
int rc;
mutex_lock(&ctx->status_mutex);
status = ctx->status;
ctx->status = CLOSED;
mutex_unlock(&ctx->status_mutex);
if (status != ATTACHED)
return 0;
dev = to_pci_dev(ctx->afu->fn->dev.parent);
afu_control_pos = ctx->afu->config.dvsec_afu_control_pos;
mutex_lock(&ctx->afu->afu_control_lock);
rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid);
mutex_unlock(&ctx->afu->afu_control_lock);
if (rc) {
/*
* If we timeout waiting for the AFU to terminate the
* pasid, then it's dangerous to clean up the Process
* Element entry in the SPA, as it may be referenced
* in the future by the AFU. In which case, we would
* checkstop because of an invalid PE access (FIR
* register 2, bit 42). So leave the PE
* defined. Caller shouldn't free the context so that
* PASID remains allocated.
*
* A link reset will be required to cleanup the AFU
* and the SPA.
*/
if (rc == -EBUSY)
return rc;
}
rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid);
if (rc) {
dev_warn(&ctx->afu->dev,
"Couldn't remove PE entry cleanly: %d\n", rc);
}
return 0;
}
void ocxl_context_detach_all(struct ocxl_afu *afu)
{
struct ocxl_context *ctx;
int tmp;
mutex_lock(&afu->contexts_lock);
idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
ocxl_context_detach(ctx);
/*
* We are force detaching - remove any active mmio
* mappings so userspace cannot interfere with the
* card if it comes back. Easiest way to exercise
* this is to unbind and rebind the driver via sysfs
* while it is in use.
*/
mutex_lock(&ctx->mapping_lock);
if (ctx->mapping)
unmap_mapping_range(ctx->mapping, 0, 0, 1);
mutex_unlock(&ctx->mapping_lock);
}
mutex_unlock(&afu->contexts_lock);
}
void ocxl_context_free(struct ocxl_context *ctx)
{
mutex_lock(&ctx->afu->contexts_lock);
ctx->afu->pasid_count--;
idr_remove(&ctx->afu->contexts_idr, ctx->pasid);
mutex_unlock(&ctx->afu->contexts_lock);
/* reference to the AFU taken in ocxl_context_init */
ocxl_afu_put(ctx->afu);
kfree(ctx);
}
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <uapi/misc/ocxl.h>
#include "ocxl_internal.h"
#define OCXL_NUM_MINORS 256 /* Total to reserve */
static dev_t ocxl_dev;
static struct class *ocxl_class;
static struct mutex minors_idr_lock;
static struct idr minors_idr;
static struct ocxl_afu *find_and_get_afu(dev_t devno)
{
struct ocxl_afu *afu;
int afu_minor;
afu_minor = MINOR(devno);
/*
* We don't declare an RCU critical section here, as our AFU
* is protected by a reference counter on the device. By the time the
* minor number of a device is removed from the idr, the ref count of
* the device is already at 0, so no user API will access that AFU and
* this function can't return it.
*/
afu = idr_find(&minors_idr, afu_minor);
if (afu)
ocxl_afu_get(afu);
return afu;
}
static int allocate_afu_minor(struct ocxl_afu *afu)
{
int minor;
mutex_lock(&minors_idr_lock);
minor = idr_alloc(&minors_idr, afu, 0, OCXL_NUM_MINORS, GFP_KERNEL);
mutex_unlock(&minors_idr_lock);
return minor;
}
static void free_afu_minor(struct ocxl_afu *afu)
{
mutex_lock(&minors_idr_lock);
idr_remove(&minors_idr, MINOR(afu->dev.devt));
mutex_unlock(&minors_idr_lock);
}
static int afu_open(struct inode *inode, struct file *file)
{
struct ocxl_afu *afu;
struct ocxl_context *ctx;
int rc;
pr_debug("%s for device %x\n", __func__, inode->i_rdev);
afu = find_and_get_afu(inode->i_rdev);
if (!afu)
return -ENODEV;
ctx = ocxl_context_alloc();
if (!ctx) {
rc = -ENOMEM;
goto put_afu;
}
rc = ocxl_context_init(ctx, afu, inode->i_mapping);
if (rc)
goto put_afu;
file->private_data = ctx;
ocxl_afu_put(afu);
return 0;
put_afu:
ocxl_afu_put(afu);
return rc;
}
static long afu_ioctl_attach(struct ocxl_context *ctx,
struct ocxl_ioctl_attach __user *uarg)
{
struct ocxl_ioctl_attach arg;
u64 amr = 0;
int rc;
pr_debug("%s for context %d\n", __func__, ctx->pasid);
if (copy_from_user(&arg, uarg, sizeof(arg)))
return -EFAULT;
/* Make sure reserved fields are not set for forward compatibility */
if (arg.reserved1 || arg.reserved2 || arg.reserved3)
return -EINVAL;
amr = arg.amr & mfspr(SPRN_UAMOR);
rc = ocxl_context_attach(ctx, amr);
return rc;
}
#define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \
"UNKNOWN")
static long afu_ioctl(struct file *file, unsigned int cmd,
unsigned long args)
{
struct ocxl_context *ctx = file->private_data;
long rc;
pr_debug("%s for context %d, command %s\n", __func__, ctx->pasid,
CMD_STR(cmd));
if (ctx->status == CLOSED)
return -EIO;
switch (cmd) {
case OCXL_IOCTL_ATTACH:
rc = afu_ioctl_attach(ctx,
(struct ocxl_ioctl_attach __user *) args);
break;
default:
rc = -EINVAL;
}
return rc;
}
static long afu_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long args)
{
return afu_ioctl(file, cmd, args);
}
static int afu_mmap(struct file *file, struct vm_area_struct *vma)
{
struct ocxl_context *ctx = file->private_data;
pr_debug("%s for context %d\n", __func__, ctx->pasid);
return ocxl_context_mmap(ctx, vma);
}
static bool has_xsl_error(struct ocxl_context *ctx)
{
bool ret;
mutex_lock(&ctx->xsl_error_lock);
ret = !!ctx->xsl_error.addr;
mutex_unlock(&ctx->xsl_error_lock);
return ret;
}
/*
* Are there any events pending on the AFU
* ctx: The AFU context
* Returns: true if there are events pending
*/
static bool afu_events_pending(struct ocxl_context *ctx)
{
if (has_xsl_error(ctx))
return true;
return false;
}
static unsigned int afu_poll(struct file *file, struct poll_table_struct *wait)
{
struct ocxl_context *ctx = file->private_data;
unsigned int mask = 0;
bool closed;
pr_debug("%s for context %d\n", __func__, ctx->pasid);
poll_wait(file, &ctx->events_wq, wait);
mutex_lock(&ctx->status_mutex);
closed = (ctx->status == CLOSED);
mutex_unlock(&ctx->status_mutex);
if (afu_events_pending(ctx))
mask = POLLIN | POLLRDNORM;
else if (closed)
mask = POLLERR;
return mask;
}
/*
* Populate the supplied buffer with a single XSL error
* ctx: The AFU context to report the error from
* header: the event header to populate
* buf: The buffer to write the body into (should be at least
* AFU_EVENT_BODY_XSL_ERROR_SIZE)
* Return: the amount of buffer that was populated
*/
static ssize_t append_xsl_error(struct ocxl_context *ctx,
struct ocxl_kernel_event_header *header,
char __user *buf)
{
struct ocxl_kernel_event_xsl_fault_error body;
memset(&body, 0, sizeof(body));
mutex_lock(&ctx->xsl_error_lock);
if (!ctx->xsl_error.addr) {
mutex_unlock(&ctx->xsl_error_lock);
return 0;
}
body.addr = ctx->xsl_error.addr;
body.dsisr = ctx->xsl_error.dsisr;
body.count = ctx->xsl_error.count;
ctx->xsl_error.addr = 0;
ctx->xsl_error.dsisr = 0;
ctx->xsl_error.count = 0;
mutex_unlock(&ctx->xsl_error_lock);
header->type = OCXL_AFU_EVENT_XSL_FAULT_ERROR;
if (copy_to_user(buf, &body, sizeof(body)))
return -EFAULT;
return sizeof(body);
}
#define AFU_EVENT_BODY_MAX_SIZE sizeof(struct ocxl_kernel_event_xsl_fault_error)
/*
* Reports events on the AFU
* Format:
* Header (struct ocxl_kernel_event_header)
* Body (struct ocxl_kernel_event_*)
* Header...
*/
static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
loff_t *off)
{
struct ocxl_context *ctx = file->private_data;
struct ocxl_kernel_event_header header;
ssize_t rc;
size_t used = 0;
DEFINE_WAIT(event_wait);
memset(&header, 0, sizeof(header));
/* Require offset to be 0 */
if (*off != 0)
return -EINVAL;
if (count < (sizeof(struct ocxl_kernel_event_header) +
AFU_EVENT_BODY_MAX_SIZE))
return -EINVAL;
for (;;) {
prepare_to_wait(&ctx->events_wq, &event_wait,
TASK_INTERRUPTIBLE);
if (afu_events_pending(ctx))
break;
if (ctx->status == CLOSED)
break;
if (file->f_flags & O_NONBLOCK) {
finish_wait(&ctx->events_wq, &event_wait);
return -EAGAIN;
}
if (signal_pending(current)) {
finish_wait(&ctx->events_wq, &event_wait);
return -ERESTARTSYS;
}
schedule();
}
finish_wait(&ctx->events_wq, &event_wait);
if (has_xsl_error(ctx)) {
used = append_xsl_error(ctx, &header, buf + sizeof(header));
if (used < 0)
return used;
}
if (!afu_events_pending(ctx))
header.flags |= OCXL_KERNEL_EVENT_FLAG_LAST;
if (copy_to_user(buf, &header, sizeof(header)))
return -EFAULT;
used += sizeof(header);
rc = (ssize_t) used;
return rc;
}
static int afu_release(struct inode *inode, struct file *file)
{
struct ocxl_context *ctx = file->private_data;
int rc;
pr_debug("%s for device %x\n", __func__, inode->i_rdev);
rc = ocxl_context_detach(ctx);
mutex_lock(&ctx->mapping_lock);
ctx->mapping = NULL;
mutex_unlock(&ctx->mapping_lock);
wake_up_all(&ctx->events_wq);
if (rc != -EBUSY)
ocxl_context_free(ctx);
return 0;
}
static const struct file_operations ocxl_afu_fops = {
.owner = THIS_MODULE,
.open = afu_open,
.unlocked_ioctl = afu_ioctl,
.compat_ioctl = afu_compat_ioctl,
.mmap = afu_mmap,
.poll = afu_poll,
.read = afu_read,
.release = afu_release,
};
int ocxl_create_cdev(struct ocxl_afu *afu)
{
int rc;
cdev_init(&afu->cdev, &ocxl_afu_fops);
rc = cdev_add(&afu->cdev, afu->dev.devt, 1);
if (rc) {
dev_err(&afu->dev, "Unable to add afu char device: %d\n", rc);
return rc;
}
return 0;
}
void ocxl_destroy_cdev(struct ocxl_afu *afu)
{
cdev_del(&afu->cdev);
}
int ocxl_register_afu(struct ocxl_afu *afu)
{
int minor;
minor = allocate_afu_minor(afu);
if (minor < 0)
return minor;
afu->dev.devt = MKDEV(MAJOR(ocxl_dev), minor);
afu->dev.class = ocxl_class;
return device_register(&afu->dev);
}
void ocxl_unregister_afu(struct ocxl_afu *afu)
{
free_afu_minor(afu);
}
static char *ocxl_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "ocxl/%s", dev_name(dev));
}
int ocxl_file_init(void)
{
int rc;
mutex_init(&minors_idr_lock);
idr_init(&minors_idr);
rc = alloc_chrdev_region(&ocxl_dev, 0, OCXL_NUM_MINORS, "ocxl");
if (rc) {
pr_err("Unable to allocate ocxl major number: %d\n", rc);
return rc;
}
ocxl_class = class_create(THIS_MODULE, "ocxl");
if (IS_ERR(ocxl_class)) {
pr_err("Unable to create ocxl class\n");
unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS);
return PTR_ERR(ocxl_class);
}
ocxl_class->devnode = ocxl_devnode;
return 0;
}
void ocxl_file_exit(void)
{
class_destroy(ocxl_class);
unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS);
idr_destroy(&minors_idr);
}
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/module.h>
#include <linux/pci.h>
#include "ocxl_internal.h"
static int __init init_ocxl(void)
{
int rc = 0;
rc = ocxl_file_init();
if (rc)
return rc;
rc = pci_register_driver(&ocxl_pci_driver);
if (rc) {
ocxl_file_exit();
return rc;
}
return 0;
}
static void exit_ocxl(void)
{
pci_unregister_driver(&ocxl_pci_driver);
ocxl_file_exit();
}
module_init(init_ocxl);
module_exit(exit_ocxl);
MODULE_DESCRIPTION("Open Coherent Accelerator");
MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#ifndef _OCXL_INTERNAL_H_
#define _OCXL_INTERNAL_H_
#include <linux/pci.h>
#include <linux/cdev.h>
#include <linux/list.h>
#define OCXL_AFU_NAME_SZ (24+1) /* add 1 for NULL termination */
#define MAX_IRQ_PER_LINK 2000
#define MAX_IRQ_PER_CONTEXT MAX_IRQ_PER_LINK
#define to_ocxl_function(d) container_of(d, struct ocxl_fn, dev)
#define to_ocxl_afu(d) container_of(d, struct ocxl_afu, dev)
extern struct pci_driver ocxl_pci_driver;
/*
* The following 2 structures are a fairly generic way of representing
* the configuration data for a function and AFU, as read from the
* configuration space.
*/
struct ocxl_afu_config {
u8 idx;
int dvsec_afu_control_pos;
char name[OCXL_AFU_NAME_SZ];
u8 version_major;
u8 version_minor;
u8 afuc_type;
u8 afum_type;
u8 profile;
u8 global_mmio_bar;
u64 global_mmio_offset;
u32 global_mmio_size;
u8 pp_mmio_bar;
u64 pp_mmio_offset;
u32 pp_mmio_stride;
u8 log_mem_size;
u8 pasid_supported_log;
u16 actag_supported;
};
struct ocxl_fn_config {
int dvsec_tl_pos;
int dvsec_function_pos;
int dvsec_afu_info_pos;
s8 max_pasid_log;
s8 max_afu_index;
};
struct ocxl_fn {
struct device dev;
int bar_used[3];
struct ocxl_fn_config config;
struct list_head afu_list;
int pasid_base;
int actag_base;
int actag_enabled;
int actag_supported;
struct list_head pasid_list;
struct list_head actag_list;
void *link;
};
struct ocxl_afu {
struct ocxl_fn *fn;
struct list_head list;
struct device dev;
struct cdev cdev;
struct ocxl_afu_config config;
int pasid_base;
int pasid_count; /* opened contexts */
int pasid_max; /* maximum number of contexts */
int actag_base;
int actag_enabled;
struct mutex contexts_lock;
struct idr contexts_idr;
struct mutex afu_control_lock;
u64 global_mmio_start;
u64 irq_base_offset;
void __iomem *global_mmio_ptr;
u64 pp_mmio_start;
struct bin_attribute attr_global_mmio;
};
enum ocxl_context_status {
CLOSED,
OPENED,
ATTACHED,
};
// Contains metadata about a translation fault
struct ocxl_xsl_error {
u64 addr; // The address that triggered the fault
u64 dsisr; // the value of the dsisr register
u64 count; // The number of times this fault has been triggered
};
struct ocxl_context {
struct ocxl_afu *afu;
int pasid;
struct mutex status_mutex;
enum ocxl_context_status status;
struct address_space *mapping;
struct mutex mapping_lock;
wait_queue_head_t events_wq;
struct mutex xsl_error_lock;
struct ocxl_xsl_error xsl_error;
struct mutex irq_lock;
struct idr irq_idr;
};
struct ocxl_process_element {
__be64 config_state;
__be32 reserved1[11];
__be32 lpid;
__be32 tid;
__be32 pid;
__be32 reserved2[10];
__be64 amr;
__be32 reserved3[3];
__be32 software_state;
};
extern struct ocxl_afu *ocxl_afu_get(struct ocxl_afu *afu);
extern void ocxl_afu_put(struct ocxl_afu *afu);
extern int ocxl_create_cdev(struct ocxl_afu *afu);
extern void ocxl_destroy_cdev(struct ocxl_afu *afu);
extern int ocxl_register_afu(struct ocxl_afu *afu);
extern void ocxl_unregister_afu(struct ocxl_afu *afu);
extern int ocxl_file_init(void);
extern void ocxl_file_exit(void);
extern int ocxl_config_read_function(struct pci_dev *dev,
struct ocxl_fn_config *fn);
extern int ocxl_config_check_afu_index(struct pci_dev *dev,
struct ocxl_fn_config *fn, int afu_idx);
extern int ocxl_config_read_afu(struct pci_dev *dev,
struct ocxl_fn_config *fn,
struct ocxl_afu_config *afu,
u8 afu_idx);
extern int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count);
extern void ocxl_config_set_afu_pasid(struct pci_dev *dev,
int afu_control,
int pasid_base, u32 pasid_count_log);
extern int ocxl_config_get_actag_info(struct pci_dev *dev,
u16 *base, u16 *enabled, u16 *supported);
extern void ocxl_config_set_actag(struct pci_dev *dev, int func_dvsec,
u32 tag_first, u32 tag_count);
extern void ocxl_config_set_afu_actag(struct pci_dev *dev, int afu_control,
int actag_base, int actag_count);
extern void ocxl_config_set_afu_state(struct pci_dev *dev, int afu_control,
int enable);
extern int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec);
extern int ocxl_config_terminate_pasid(struct pci_dev *dev, int afu_control,
int pasid);
extern int ocxl_link_setup(struct pci_dev *dev, int PE_mask,
void **link_handle);
extern void ocxl_link_release(struct pci_dev *dev, void *link_handle);
extern int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
u64 amr, struct mm_struct *mm,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data);
extern int ocxl_link_remove_pe(void *link_handle, int pasid);
extern int ocxl_link_irq_alloc(void *link_handle, int *hw_irq,
u64 *addr);
extern void ocxl_link_free_irq(void *link_handle, int hw_irq);
extern int ocxl_pasid_afu_alloc(struct ocxl_fn *fn, u32 size);
extern void ocxl_pasid_afu_free(struct ocxl_fn *fn, u32 start, u32 size);
extern int ocxl_actag_afu_alloc(struct ocxl_fn *fn, u32 size);
extern void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size);
extern struct ocxl_context *ocxl_context_alloc(void);
extern int ocxl_context_init(struct ocxl_context *ctx, struct ocxl_afu *afu,
struct address_space *mapping);
extern int ocxl_context_attach(struct ocxl_context *ctx, u64 amr);
extern int ocxl_context_mmap(struct ocxl_context *ctx,
struct vm_area_struct *vma);
extern int ocxl_context_detach(struct ocxl_context *ctx);
extern void ocxl_context_detach_all(struct ocxl_afu *afu);
extern void ocxl_context_free(struct ocxl_context *ctx);
extern int ocxl_sysfs_add_afu(struct ocxl_afu *afu);
extern void ocxl_sysfs_remove_afu(struct ocxl_afu *afu);
#endif /* _OCXL_INTERNAL_H_ */
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include "ocxl_internal.h"
struct id_range {
struct list_head list;
u32 start;
u32 end;
};
#ifdef DEBUG
static void dump_list(struct list_head *head, char *type_str)
{
struct id_range *cur;
pr_debug("%s ranges allocated:\n", type_str);
list_for_each_entry(cur, head, list) {
pr_debug("Range %d->%d\n", cur->start, cur->end);
}
}
#endif
static int range_alloc(struct list_head *head, u32 size, int max_id,
char *type_str)
{
struct list_head *pos;
struct id_range *cur, *new;
int rc, last_end;
new = kmalloc(sizeof(struct id_range), GFP_KERNEL);
if (!new)
return -ENOMEM;
pos = head;
last_end = -1;
list_for_each_entry(cur, head, list) {
if ((cur->start - last_end) > size)
break;
last_end = cur->end;
pos = &cur->list;
}
new->start = last_end + 1;
new->end = new->start + size - 1;
if (new->end > max_id) {
kfree(new);
rc = -ENOSPC;
} else {
list_add(&new->list, pos);
rc = new->start;
}
#ifdef DEBUG
dump_list(head, type_str);
#endif
return rc;
}
static void range_free(struct list_head *head, u32 start, u32 size,
char *type_str)
{
bool found = false;
struct id_range *cur, *tmp;
list_for_each_entry_safe(cur, tmp, head, list) {
if (cur->start == start && cur->end == (start + size - 1)) {
found = true;
list_del(&cur->list);
kfree(cur);
break;
}
}
WARN_ON(!found);
#ifdef DEBUG
dump_list(head, type_str);
#endif
}
int ocxl_pasid_afu_alloc(struct ocxl_fn *fn, u32 size)
{
int max_pasid;
if (fn->config.max_pasid_log < 0)
return -ENOSPC;
max_pasid = 1 << fn->config.max_pasid_log;
return range_alloc(&fn->pasid_list, size, max_pasid, "afu pasid");
}
void ocxl_pasid_afu_free(struct ocxl_fn *fn, u32 start, u32 size)
{
return range_free(&fn->pasid_list, start, size, "afu pasid");
}
int ocxl_actag_afu_alloc(struct ocxl_fn *fn, u32 size)
{
int max_actag;
max_actag = fn->actag_enabled;
return range_alloc(&fn->actag_list, size, max_actag, "afu actag");
}
void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size)
{
return range_free(&fn->actag_list, start, size, "afu actag");
}
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/sysfs.h>
#include "ocxl_internal.h"
static ssize_t global_mmio_size_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct ocxl_afu *afu = to_ocxl_afu(device);
return scnprintf(buf, PAGE_SIZE, "%d\n",
afu->config.global_mmio_size);
}
static ssize_t pp_mmio_size_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct ocxl_afu *afu = to_ocxl_afu(device);
return scnprintf(buf, PAGE_SIZE, "%d\n",
afu->config.pp_mmio_stride);
}
static ssize_t afu_version_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct ocxl_afu *afu = to_ocxl_afu(device);
return scnprintf(buf, PAGE_SIZE, "%hhu:%hhu\n",
afu->config.version_major,
afu->config.version_minor);
}
static ssize_t contexts_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct ocxl_afu *afu = to_ocxl_afu(device);
return scnprintf(buf, PAGE_SIZE, "%d/%d\n",
afu->pasid_count, afu->pasid_max);
}
static struct device_attribute afu_attrs[] = {
__ATTR_RO(global_mmio_size),
__ATTR_RO(pp_mmio_size),
__ATTR_RO(afu_version),
__ATTR_RO(contexts),
};
static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ocxl_afu *afu = to_ocxl_afu(kobj_to_dev(kobj));
if (count == 0 || off < 0 ||
off >= afu->config.global_mmio_size)
return 0;
memcpy_fromio(buf, afu->global_mmio_ptr + off, count);
return count;
}
static int global_mmio_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ocxl_afu *afu = vma->vm_private_data;
unsigned long offset;
if (vmf->pgoff >= (afu->config.global_mmio_size >> PAGE_SHIFT))
return VM_FAULT_SIGBUS;
offset = vmf->pgoff;
offset += (afu->global_mmio_start >> PAGE_SHIFT);
vm_insert_pfn(vma, vmf->address, offset);
return VM_FAULT_NOPAGE;
}
static const struct vm_operations_struct global_mmio_vmops = {
.fault = global_mmio_fault,
};
static int global_mmio_mmap(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
struct vm_area_struct *vma)
{
struct ocxl_afu *afu = to_ocxl_afu(kobj_to_dev(kobj));
if ((vma_pages(vma) + vma->vm_pgoff) >
(afu->config.global_mmio_size >> PAGE_SHIFT))
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &global_mmio_vmops;
vma->vm_private_data = afu;
return 0;
}
int ocxl_sysfs_add_afu(struct ocxl_afu *afu)
{
int i, rc;
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
rc = device_create_file(&afu->dev, &afu_attrs[i]);
if (rc)
goto err;
}
sysfs_attr_init(&afu->attr_global_mmio.attr);
afu->attr_global_mmio.attr.name = "global_mmio_area";
afu->attr_global_mmio.attr.mode = 0600;
afu->attr_global_mmio.size = afu->config.global_mmio_size;
afu->attr_global_mmio.read = global_mmio_read;
afu->attr_global_mmio.mmap = global_mmio_mmap;
rc = device_create_bin_file(&afu->dev, &afu->attr_global_mmio);
if (rc) {
dev_err(&afu->dev,
"Unable to create global mmio attr for afu: %d\n",
rc);
goto err;
}
return 0;
err:
for (i--; i >= 0; i--)
device_remove_file(&afu->dev, &afu_attrs[i]);
return rc;
}
void ocxl_sysfs_remove_afu(struct ocxl_afu *afu)
{
int i;
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++)
device_remove_file(&afu->dev, &afu_attrs[i]);
device_remove_bin_file(&afu->dev, &afu->attr_global_mmio);
}
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/* Copyright 2017 IBM Corp. */
#ifndef _UAPI_MISC_OCXL_H
#define _UAPI_MISC_OCXL_H
#include <linux/types.h>
#include <linux/ioctl.h>
enum ocxl_event_type {
OCXL_AFU_EVENT_XSL_FAULT_ERROR = 0,
};
#define OCXL_KERNEL_EVENT_FLAG_LAST 0x0001 /* This is the last event pending */
struct ocxl_kernel_event_header {
__u16 type;
__u16 flags;
__u32 reserved;
};
struct ocxl_kernel_event_xsl_fault_error {
__u64 addr;
__u64 dsisr;
__u64 count;
__u64 reserved;
};
struct ocxl_ioctl_attach {
__u64 amr;
__u64 reserved1;
__u64 reserved2;
__u64 reserved3;
};
/* ioctl numbers */
#define OCXL_MAGIC 0xCA
/* AFU devices */
#define OCXL_IOCTL_ATTACH _IOW(OCXL_MAGIC, 0x10, struct ocxl_ioctl_attach)
#endif /* _UAPI_MISC_OCXL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment