Commit b9bb6fb7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux

Pull virtio updates from Rusty Russell:
 "Some virtio internal cleanups, a new virtio device "virtio input", and
  a change to allow the legacy virtio balloon.

  Most excitingly, some lguest work! No seriously, I got some cleanup
  patches"

* tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux:
  virtio: drop virtio_device_is_legacy_only
  virtio_pci: support non-legacy balloon devices
  virtio_mmio: support non-legacy balloon devices
  virtio_ccw: support non-legacy balloon devices
  virtio: balloon might not be a legacy device
  virtio_balloon: transitional interface
  virtio_ring: Update weak barriers to use dma_wmb/rmb
  virtio_pci_modern: switch to type-safe io accessors
  virtio_pci_modern: type-safe io accessors
  lguest: handle traps on the "interrupt suppressed" iret instruction.
  virtio: drop a useless config read
  virtio_config: reorder functions
  Add virtio-input driver.
  lguest: suppress interrupts for single insn, not range.
  lguest: simplify lguest_iret
  lguest: rename i386_head.S in the comments
  lguest: explicitly set miscdevice's private_data NULL
  lguest: fix pending interrupt test.
parents 15ce2658 9abbfb48
...@@ -10517,6 +10517,12 @@ S: Maintained ...@@ -10517,6 +10517,12 @@ S: Maintained
F: drivers/vhost/ F: drivers/vhost/
F: include/uapi/linux/vhost.h F: include/uapi/linux/vhost.h
VIRTIO INPUT DRIVER
M: Gerd Hoffmann <kraxel@redhat.com>
S: Maintained
F: drivers/virtio/virtio_input.c
F: include/uapi/linux/virtio_input.h
VIA RHINE NETWORK DRIVER VIA RHINE NETWORK DRIVER
M: Roger Luethi <rl@hellgate.ch> M: Roger Luethi <rl@hellgate.ch>
S: Maintained S: Maintained
......
...@@ -20,13 +20,10 @@ extern unsigned long switcher_addr; ...@@ -20,13 +20,10 @@ extern unsigned long switcher_addr;
/* Found in switcher.S */ /* Found in switcher.S */
extern unsigned long default_idt_entries[]; extern unsigned long default_idt_entries[];
/* Declarations for definitions in lguest_guest.S */ /* Declarations for definitions in arch/x86/lguest/head_32.S */
extern char lguest_noirq_start[], lguest_noirq_end[]; extern char lguest_noirq_iret[];
extern const char lgstart_cli[], lgend_cli[]; extern const char lgstart_cli[], lgend_cli[];
extern const char lgstart_sti[], lgend_sti[];
extern const char lgstart_popf[], lgend_popf[];
extern const char lgstart_pushf[], lgend_pushf[]; extern const char lgstart_pushf[], lgend_pushf[];
extern const char lgstart_iret[], lgend_iret[];
extern void lguest_iret(void); extern void lguest_iret(void);
extern void lguest_init(void); extern void lguest_init(void);
......
...@@ -87,8 +87,7 @@ ...@@ -87,8 +87,7 @@
struct lguest_data lguest_data = { struct lguest_data lguest_data = {
.hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF }, .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
.noirq_start = (u32)lguest_noirq_start, .noirq_iret = (u32)lguest_noirq_iret,
.noirq_end = (u32)lguest_noirq_end,
.kernel_address = PAGE_OFFSET, .kernel_address = PAGE_OFFSET,
.blocked_interrupts = { 1 }, /* Block timer interrupts */ .blocked_interrupts = { 1 }, /* Block timer interrupts */
.syscall_vec = SYSCALL_VECTOR, .syscall_vec = SYSCALL_VECTOR,
...@@ -262,7 +261,7 @@ PV_CALLEE_SAVE_REGS_THUNK(lguest_save_fl); ...@@ -262,7 +261,7 @@ PV_CALLEE_SAVE_REGS_THUNK(lguest_save_fl);
PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable); PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable);
/*:*/ /*:*/
/* These are in i386_head.S */ /* These are in head_32.S */
extern void lg_irq_enable(void); extern void lg_irq_enable(void);
extern void lg_restore_fl(unsigned long flags); extern void lg_restore_fl(unsigned long flags);
...@@ -1368,7 +1367,7 @@ static void lguest_restart(char *reason) ...@@ -1368,7 +1367,7 @@ static void lguest_restart(char *reason)
* fit comfortably. * fit comfortably.
* *
* First we need assembly templates of each of the patchable Guest operations, * First we need assembly templates of each of the patchable Guest operations,
* and these are in i386_head.S. * and these are in head_32.S.
*/ */
/*G:060 We construct a table from the assembler templates: */ /*G:060 We construct a table from the assembler templates: */
......
...@@ -84,7 +84,7 @@ ENTRY(lg_irq_enable) ...@@ -84,7 +84,7 @@ ENTRY(lg_irq_enable)
* set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we
* jump to send_interrupts, otherwise we're done. * jump to send_interrupts, otherwise we're done.
*/ */
testl $0, lguest_data+LGUEST_DATA_irq_pending cmpl $0, lguest_data+LGUEST_DATA_irq_pending
jnz send_interrupts jnz send_interrupts
/* /*
* One cool thing about x86 is that you can do many things without using * One cool thing about x86 is that you can do many things without using
...@@ -133,9 +133,8 @@ ENTRY(lg_restore_fl) ...@@ -133,9 +133,8 @@ ENTRY(lg_restore_fl)
ret ret
/*:*/ /*:*/
/* These demark the EIP range where host should never deliver interrupts. */ /* These demark the EIP where host should never deliver interrupts. */
.global lguest_noirq_start .global lguest_noirq_iret
.global lguest_noirq_end
/*M:004 /*M:004
* When the Host reflects a trap or injects an interrupt into the Guest, it * When the Host reflects a trap or injects an interrupt into the Guest, it
...@@ -168,29 +167,26 @@ ENTRY(lg_restore_fl) ...@@ -168,29 +167,26 @@ ENTRY(lg_restore_fl)
* So we have to copy eflags from the stack to lguest_data.irq_enabled before * So we have to copy eflags from the stack to lguest_data.irq_enabled before
* we do the "iret". * we do the "iret".
* *
* There are two problems with this: firstly, we need to use a register to do * There are two problems with this: firstly, we can't clobber any registers
* the copy and secondly, the whole thing needs to be atomic. The first * and secondly, the whole thing needs to be atomic. The first problem
* problem is easy to solve: push %eax on the stack so we can use it, and then * is solved by using "push memory"/"pop memory" instruction pair for copying.
* restore it at the end just before the real "iret".
* *
* The second is harder: copying eflags to lguest_data.irq_enabled will turn * The second is harder: copying eflags to lguest_data.irq_enabled will turn
* interrupts on before we're finished, so we could be interrupted before we * interrupts on before we're finished, so we could be interrupted before we
* return to userspace or wherever. Our solution to this is to surround the * return to userspace or wherever. Our solution to this is to tell the
* code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the
* Host that it is *never* to interrupt us there, even if interrupts seem to be * Host that it is *never* to interrupt us there, even if interrupts seem to be
* enabled. * enabled. (It's not necessary to protect pop instruction, since
* data gets updated only after it completes, so we only need to protect
* one instruction, iret).
*/ */
ENTRY(lguest_iret) ENTRY(lguest_iret)
pushl %eax pushl 2*4(%esp)
movl 12(%esp), %eax
lguest_noirq_start:
/* /*
* Note the %ss: segment prefix here. Normal data accesses use the * Note the %ss: segment prefix here. Normal data accesses use the
* "ds" segment, but that will have already been restored for whatever * "ds" segment, but that will have already been restored for whatever
* we're returning to (such as userspace): we can't trust it. The %ss: * we're returning to (such as userspace): we can't trust it. The %ss:
* prefix makes sure we use the stack segment, which is still valid. * prefix makes sure we use the stack segment, which is still valid.
*/ */
movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled popl %ss:lguest_data+LGUEST_DATA_irq_enabled
popl %eax lguest_noirq_iret:
iret iret
lguest_noirq_end:
...@@ -211,10 +211,9 @@ static void initialize(struct lg_cpu *cpu) ...@@ -211,10 +211,9 @@ static void initialize(struct lg_cpu *cpu)
/* /*
* The Guest tells us where we're not to deliver interrupts by putting * The Guest tells us where we're not to deliver interrupts by putting
* the range of addresses into "struct lguest_data". * the instruction address into "struct lguest_data".
*/ */
if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start) if (get_user(cpu->lg->noirq_iret, &cpu->lg->lguest_data->noirq_iret))
|| get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end))
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
/* /*
......
...@@ -56,21 +56,16 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) ...@@ -56,21 +56,16 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
} }
/*H:210 /*H:210
* The set_guest_interrupt() routine actually delivers the interrupt or * The push_guest_interrupt_stack() routine saves Guest state on the stack for
* trap. The mechanics of delivering traps and interrupts to the Guest are the * an interrupt or trap. The mechanics of delivering traps and interrupts to
* same, except some traps have an "error code" which gets pushed onto the * the Guest are the same, except some traps have an "error code" which gets
* stack as well: the caller tells us if this is one. * pushed onto the stack as well: the caller tells us if this is one.
*
* "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
* interrupt or trap. It's split into two parts for traditional reasons: gcc
* on i386 used to be frightened by 64 bit numbers.
* *
* We set up the stack just like the CPU does for a real interrupt, so it's * We set up the stack just like the CPU does for a real interrupt, so it's
* identical for the Guest (and the standard "iret" instruction will undo * identical for the Guest (and the standard "iret" instruction will undo
* it). * it).
*/ */
static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, static void push_guest_interrupt_stack(struct lg_cpu *cpu, bool has_err)
bool has_err)
{ {
unsigned long gstack, origstack; unsigned long gstack, origstack;
u32 eflags, ss, irq_enable; u32 eflags, ss, irq_enable;
...@@ -130,12 +125,28 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, ...@@ -130,12 +125,28 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
if (has_err) if (has_err)
push_guest_stack(cpu, &gstack, cpu->regs->errcode); push_guest_stack(cpu, &gstack, cpu->regs->errcode);
/* /* Adjust the stack pointer and stack segment. */
* Now we've pushed all the old state, we change the stack, the code
* segment and the address to execute.
*/
cpu->regs->ss = ss; cpu->regs->ss = ss;
cpu->regs->esp = virtstack + (gstack - origstack); cpu->regs->esp = virtstack + (gstack - origstack);
}
/*
* This actually makes the Guest start executing the given interrupt/trap
* handler.
*
* "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
* interrupt or trap. It's split into two parts for traditional reasons: gcc
* on i386 used to be frightened by 64 bit numbers.
*/
static void guest_run_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi)
{
/* If we're already in the kernel, we don't change stacks. */
if ((cpu->regs->ss&0x3) != GUEST_PL)
cpu->regs->ss = cpu->esp1;
/*
* Set the code segment and the address to execute.
*/
cpu->regs->cs = (__KERNEL_CS|GUEST_PL); cpu->regs->cs = (__KERNEL_CS|GUEST_PL);
cpu->regs->eip = idt_address(lo, hi); cpu->regs->eip = idt_address(lo, hi);
...@@ -158,6 +169,24 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, ...@@ -158,6 +169,24 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
kill_guest(cpu, "Disabling interrupts"); kill_guest(cpu, "Disabling interrupts");
} }
/* This restores the eflags word which was pushed on the stack by a trap */
static void restore_eflags(struct lg_cpu *cpu)
{
/* This is the physical address of the stack. */
unsigned long stack_pa = guest_pa(cpu, cpu->regs->esp);
/*
* Stack looks like this:
* Address Contents
* esp EIP
* esp + 4 CS
* esp + 8 EFLAGS
*/
cpu->regs->eflags = lgread(cpu, stack_pa + 8, u32);
cpu->regs->eflags &=
~(X86_EFLAGS_TF|X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT);
}
/*H:205 /*H:205
* Virtual Interrupts. * Virtual Interrupts.
* *
...@@ -200,14 +229,6 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) ...@@ -200,14 +229,6 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
BUG_ON(irq >= LGUEST_IRQS); BUG_ON(irq >= LGUEST_IRQS);
/*
* They may be in the middle of an iret, where they asked us never to
* deliver interrupts.
*/
if (cpu->regs->eip >= cpu->lg->noirq_start &&
(cpu->regs->eip < cpu->lg->noirq_end))
return;
/* If they're halted, interrupts restart them. */ /* If they're halted, interrupts restart them. */
if (cpu->halted) { if (cpu->halted) {
/* Re-enable interrupts. */ /* Re-enable interrupts. */
...@@ -237,12 +258,34 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) ...@@ -237,12 +258,34 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
if (idt_present(idt->a, idt->b)) { if (idt_present(idt->a, idt->b)) {
/* OK, mark it no longer pending and deliver it. */ /* OK, mark it no longer pending and deliver it. */
clear_bit(irq, cpu->irqs_pending); clear_bit(irq, cpu->irqs_pending);
/* /*
* set_guest_interrupt() takes the interrupt descriptor and a * They may be about to iret, where they asked us never to
* flag to say whether this interrupt pushes an error code onto * deliver interrupts. In this case, we can emulate that iret
* the stack as well: virtual interrupts never do. * then immediately deliver the interrupt. This is basically
* a noop: the iret would pop the interrupt frame and restore
* eflags, and then we'd set it up again. So just restore the
* eflags word and jump straight to the handler in this case.
*
* Denys Vlasenko points out that this isn't quite right: if
* the iret was returning to userspace, then that interrupt
* would reset the stack pointer (which the Guest told us
* about via LHCALL_SET_STACK). But unless the Guest is being
* *really* weird, that will be the same as the current stack
* anyway.
*/ */
set_guest_interrupt(cpu, idt->a, idt->b, false); if (cpu->regs->eip == cpu->lg->noirq_iret) {
restore_eflags(cpu);
} else {
/*
* set_guest_interrupt() takes a flag to say whether
* this interrupt pushes an error code onto the stack
* as well: virtual interrupts never do.
*/
push_guest_interrupt_stack(cpu, false);
}
/* Actually make Guest cpu jump to handler. */
guest_run_interrupt(cpu, idt->a, idt->b);
} }
/* /*
...@@ -353,8 +396,9 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num) ...@@ -353,8 +396,9 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
*/ */
if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
return false; return false;
set_guest_interrupt(cpu, cpu->arch.idt[num].a, push_guest_interrupt_stack(cpu, has_err(num));
cpu->arch.idt[num].b, has_err(num)); guest_run_interrupt(cpu, cpu->arch.idt[num].a,
cpu->arch.idt[num].b);
return true; return true;
} }
...@@ -395,8 +439,9 @@ static bool direct_trap(unsigned int num) ...@@ -395,8 +439,9 @@ static bool direct_trap(unsigned int num)
* The Guest has the ability to turn its interrupt gates into trap gates, * The Guest has the ability to turn its interrupt gates into trap gates,
* if it is careful. The Host will let trap gates can go directly to the * if it is careful. The Host will let trap gates can go directly to the
* Guest, but the Guest needs the interrupts atomically disabled for an * Guest, but the Guest needs the interrupts atomically disabled for an
* interrupt gate. It can do this by pointing the trap gate at instructions * interrupt gate. The Host could provide a mechanism to register more
* within noirq_start and noirq_end, where it can safely disable interrupts. * "no-interrupt" regions, and the Guest could point the trap gate at
* instructions within that region, where it can safely disable interrupts.
*/ */
/*M:006 /*M:006
......
...@@ -102,7 +102,7 @@ struct lguest { ...@@ -102,7 +102,7 @@ struct lguest {
struct pgdir pgdirs[4]; struct pgdir pgdirs[4];
unsigned long noirq_start, noirq_end; unsigned long noirq_iret;
unsigned int stack_pages; unsigned int stack_pages;
u32 tsc_khz; u32 tsc_khz;
......
...@@ -339,6 +339,13 @@ static ssize_t write(struct file *file, const char __user *in, ...@@ -339,6 +339,13 @@ static ssize_t write(struct file *file, const char __user *in,
} }
} }
static int open(struct inode *inode, struct file *file)
{
file->private_data = NULL;
return 0;
}
/*L:060 /*L:060
* The final piece of interface code is the close() routine. It reverses * The final piece of interface code is the close() routine. It reverses
* everything done in initialize(). This is usually called because the * everything done in initialize(). This is usually called because the
...@@ -409,6 +416,7 @@ static int close(struct inode *inode, struct file *file) ...@@ -409,6 +416,7 @@ static int close(struct inode *inode, struct file *file)
*/ */
static const struct file_operations lguest_fops = { static const struct file_operations lguest_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = open,
.release = close, .release = close,
.write = write, .write = write,
.read = read, .read = read,
......
...@@ -1201,13 +1201,9 @@ static int virtio_ccw_online(struct ccw_device *cdev) ...@@ -1201,13 +1201,9 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->vdev.id.vendor = cdev->id.cu_type; vcdev->vdev.id.vendor = cdev->id.cu_type;
vcdev->vdev.id.device = cdev->id.cu_model; vcdev->vdev.id.device = cdev->id.cu_model;
if (virtio_device_is_legacy_only(vcdev->vdev.id)) { ret = virtio_ccw_set_transport_rev(vcdev);
vcdev->revision = 0; if (ret)
} else { goto out_free;
ret = virtio_ccw_set_transport_rev(vcdev);
if (ret)
goto out_free;
}
ret = register_virtio_device(&vcdev->vdev); ret = register_virtio_device(&vcdev->vdev);
if (ret) { if (ret) {
......
...@@ -48,6 +48,16 @@ config VIRTIO_BALLOON ...@@ -48,6 +48,16 @@ config VIRTIO_BALLOON
If unsure, say M. If unsure, say M.
config VIRTIO_INPUT
tristate "Virtio input driver"
depends on VIRTIO
depends on INPUT
---help---
This driver supports virtio input devices such as
keyboards, mice and tablets.
If unsure, say M.
config VIRTIO_MMIO config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices" tristate "Platform bus driver for memory mapped virtio devices"
depends on HAS_IOMEM depends on HAS_IOMEM
......
...@@ -4,3 +4,4 @@ obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o ...@@ -4,3 +4,4 @@ obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
...@@ -278,12 +278,6 @@ static struct bus_type virtio_bus = { ...@@ -278,12 +278,6 @@ static struct bus_type virtio_bus = {
.remove = virtio_dev_remove, .remove = virtio_dev_remove,
}; };
bool virtio_device_is_legacy_only(struct virtio_device_id id)
{
return id.device == VIRTIO_ID_BALLOON;
}
EXPORT_SYMBOL_GPL(virtio_device_is_legacy_only);
int register_virtio_driver(struct virtio_driver *driver) int register_virtio_driver(struct virtio_driver *driver)
{ {
/* Catch this early. */ /* Catch this early. */
......
...@@ -214,8 +214,8 @@ static inline void update_stat(struct virtio_balloon *vb, int idx, ...@@ -214,8 +214,8 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
u16 tag, u64 val) u16 tag, u64 val)
{ {
BUG_ON(idx >= VIRTIO_BALLOON_S_NR); BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
vb->stats[idx].tag = tag; vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag);
vb->stats[idx].val = val; vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val);
} }
#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
...@@ -283,18 +283,27 @@ static void virtballoon_changed(struct virtio_device *vdev) ...@@ -283,18 +283,27 @@ static void virtballoon_changed(struct virtio_device *vdev)
static inline s64 towards_target(struct virtio_balloon *vb) static inline s64 towards_target(struct virtio_balloon *vb)
{ {
__le32 v;
s64 target; s64 target;
u32 num_pages;
virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &v); virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages,
&num_pages);
target = le32_to_cpu(v); /* Legacy balloon config space is LE, unlike all other devices. */
if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
num_pages = le32_to_cpu((__force __le32)num_pages);
target = num_pages;
return target - vb->num_pages; return target - vb->num_pages;
} }
static void update_balloon_size(struct virtio_balloon *vb) static void update_balloon_size(struct virtio_balloon *vb)
{ {
__le32 actual = cpu_to_le32(vb->num_pages); u32 actual = vb->num_pages;
/* Legacy balloon config space is LE, unlike all other devices. */
if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
actual = (__force u32)cpu_to_le32(actual);
virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual, virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
&actual); &actual);
......
This diff is collapsed.
...@@ -581,14 +581,6 @@ static int virtio_mmio_probe(struct platform_device *pdev) ...@@ -581,14 +581,6 @@ static int virtio_mmio_probe(struct platform_device *pdev)
} }
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
/* Reject legacy-only IDs for version 2 devices */
if (vm_dev->version == 2 &&
virtio_device_is_legacy_only(vm_dev->vdev.id)) {
dev_err(&pdev->dev, "Version 2 not supported for devices %u!\n",
vm_dev->vdev.id.device);
return -ENODEV;
}
if (vm_dev->version == 1) if (vm_dev->version == 1)
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
......
...@@ -20,6 +20,50 @@ ...@@ -20,6 +20,50 @@
#define VIRTIO_PCI_NO_LEGACY #define VIRTIO_PCI_NO_LEGACY
#include "virtio_pci_common.h" #include "virtio_pci_common.h"
/*
* Type-safe wrappers for io accesses.
* Use these to enforce at compile time the following spec requirement:
*
* The driver MUST access each field using the “natural” access
* method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
* for 16-bit fields and 8-bit accesses for 8-bit fields.
*/
static inline u8 vp_ioread8(u8 __iomem *addr)
{
return ioread8(addr);
}
static inline u16 vp_ioread16 (u16 __iomem *addr)
{
return ioread16(addr);
}
static inline u32 vp_ioread32(u32 __iomem *addr)
{
return ioread32(addr);
}
static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
{
iowrite8(value, addr);
}
static inline void vp_iowrite16(u16 value, u16 __iomem *addr)
{
iowrite16(value, addr);
}
static inline void vp_iowrite32(u32 value, u32 __iomem *addr)
{
iowrite32(value, addr);
}
static void vp_iowrite64_twopart(u64 val,
__le32 __iomem *lo, __le32 __iomem *hi)
{
vp_iowrite32((u32)val, lo);
vp_iowrite32(val >> 32, hi);
}
static void __iomem *map_capability(struct pci_dev *dev, int off, static void __iomem *map_capability(struct pci_dev *dev, int off,
size_t minlen, size_t minlen,
u32 align, u32 align,
...@@ -94,22 +138,16 @@ static void __iomem *map_capability(struct pci_dev *dev, int off, ...@@ -94,22 +138,16 @@ static void __iomem *map_capability(struct pci_dev *dev, int off,
return p; return p;
} }
static void iowrite64_twopart(u64 val, __le32 __iomem *lo, __le32 __iomem *hi)
{
iowrite32((u32)val, lo);
iowrite32(val >> 32, hi);
}
/* virtio config->get_features() implementation */ /* virtio config->get_features() implementation */
static u64 vp_get_features(struct virtio_device *vdev) static u64 vp_get_features(struct virtio_device *vdev)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
u64 features; u64 features;
iowrite32(0, &vp_dev->common->device_feature_select); vp_iowrite32(0, &vp_dev->common->device_feature_select);
features = ioread32(&vp_dev->common->device_feature); features = vp_ioread32(&vp_dev->common->device_feature);
iowrite32(1, &vp_dev->common->device_feature_select); vp_iowrite32(1, &vp_dev->common->device_feature_select);
features |= ((u64)ioread32(&vp_dev->common->device_feature) << 32); features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32);
return features; return features;
} }
...@@ -128,10 +166,10 @@ static int vp_finalize_features(struct virtio_device *vdev) ...@@ -128,10 +166,10 @@ static int vp_finalize_features(struct virtio_device *vdev)
return -EINVAL; return -EINVAL;
} }
iowrite32(0, &vp_dev->common->guest_feature_select); vp_iowrite32(0, &vp_dev->common->guest_feature_select);
iowrite32((u32)vdev->features, &vp_dev->common->guest_feature); vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
iowrite32(1, &vp_dev->common->guest_feature_select); vp_iowrite32(1, &vp_dev->common->guest_feature_select);
iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature); vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
return 0; return 0;
} }
...@@ -210,14 +248,14 @@ static void vp_set(struct virtio_device *vdev, unsigned offset, ...@@ -210,14 +248,14 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
static u32 vp_generation(struct virtio_device *vdev) static u32 vp_generation(struct virtio_device *vdev)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
return ioread8(&vp_dev->common->config_generation); return vp_ioread8(&vp_dev->common->config_generation);
} }
/* config->{get,set}_status() implementations */ /* config->{get,set}_status() implementations */
static u8 vp_get_status(struct virtio_device *vdev) static u8 vp_get_status(struct virtio_device *vdev)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
return ioread8(&vp_dev->common->device_status); return vp_ioread8(&vp_dev->common->device_status);
} }
static void vp_set_status(struct virtio_device *vdev, u8 status) static void vp_set_status(struct virtio_device *vdev, u8 status)
...@@ -225,17 +263,17 @@ static void vp_set_status(struct virtio_device *vdev, u8 status) ...@@ -225,17 +263,17 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* We should never be setting status to 0. */ /* We should never be setting status to 0. */
BUG_ON(status == 0); BUG_ON(status == 0);
iowrite8(status, &vp_dev->common->device_status); vp_iowrite8(status, &vp_dev->common->device_status);
} }
static void vp_reset(struct virtio_device *vdev) static void vp_reset(struct virtio_device *vdev)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */ /* 0 status means a reset. */
iowrite8(0, &vp_dev->common->device_status); vp_iowrite8(0, &vp_dev->common->device_status);
/* Flush out the status write, and flush in device writes, /* Flush out the status write, and flush in device writes,
* including MSI-X interrupts, if any. */ * including MSI-X interrupts, if any. */
ioread8(&vp_dev->common->device_status); vp_ioread8(&vp_dev->common->device_status);
/* Flush pending VQ/configuration callbacks. */ /* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev); vp_synchronize_vectors(vdev);
} }
...@@ -243,10 +281,10 @@ static void vp_reset(struct virtio_device *vdev) ...@@ -243,10 +281,10 @@ static void vp_reset(struct virtio_device *vdev)
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{ {
/* Setup the vector used for configuration events */ /* Setup the vector used for configuration events */
iowrite16(vector, &vp_dev->common->msix_config); vp_iowrite16(vector, &vp_dev->common->msix_config);
/* Verify we had enough resources to assign the vector */ /* Verify we had enough resources to assign the vector */
/* Will also flush the write out to device */ /* Will also flush the write out to device */
return ioread16(&vp_dev->common->msix_config); return vp_ioread16(&vp_dev->common->msix_config);
} }
static size_t vring_pci_size(u16 num) static size_t vring_pci_size(u16 num)
...@@ -286,15 +324,15 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, ...@@ -286,15 +324,15 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
u16 num, off; u16 num, off;
int err; int err;
if (index >= ioread16(&cfg->num_queues)) if (index >= vp_ioread16(&cfg->num_queues))
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
/* Select the queue we're interested in */ /* Select the queue we're interested in */
iowrite16(index, &cfg->queue_select); vp_iowrite16(index, &cfg->queue_select);
/* Check if queue is either not available or already active. */ /* Check if queue is either not available or already active. */
num = ioread16(&cfg->queue_size); num = vp_ioread16(&cfg->queue_size);
if (!num || ioread16(&cfg->queue_enable)) if (!num || vp_ioread16(&cfg->queue_enable))
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
if (num & (num - 1)) { if (num & (num - 1)) {
...@@ -303,7 +341,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, ...@@ -303,7 +341,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
} }
/* get offset of notification word for this vq */ /* get offset of notification word for this vq */
off = ioread16(&cfg->queue_notify_off); off = vp_ioread16(&cfg->queue_notify_off);
info->num = num; info->num = num;
info->msix_vector = msix_vec; info->msix_vector = msix_vec;
...@@ -322,13 +360,13 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, ...@@ -322,13 +360,13 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
} }
/* activate the queue */ /* activate the queue */
iowrite16(num, &cfg->queue_size); vp_iowrite16(num, &cfg->queue_size);
iowrite64_twopart(virt_to_phys(info->queue), vp_iowrite64_twopart(virt_to_phys(info->queue),
&cfg->queue_desc_lo, &cfg->queue_desc_hi); &cfg->queue_desc_lo, &cfg->queue_desc_hi);
iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)), vp_iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
&cfg->queue_avail_lo, &cfg->queue_avail_hi); &cfg->queue_avail_lo, &cfg->queue_avail_hi);
iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)), vp_iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
&cfg->queue_used_lo, &cfg->queue_used_hi); &cfg->queue_used_lo, &cfg->queue_used_hi);
if (vp_dev->notify_base) { if (vp_dev->notify_base) {
/* offset should not wrap */ /* offset should not wrap */
...@@ -357,8 +395,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, ...@@ -357,8 +395,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
} }
if (msix_vec != VIRTIO_MSI_NO_VECTOR) { if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
iowrite16(msix_vec, &cfg->queue_msix_vector); vp_iowrite16(msix_vec, &cfg->queue_msix_vector);
msix_vec = ioread16(&cfg->queue_msix_vector); msix_vec = vp_ioread16(&cfg->queue_msix_vector);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) { if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY; err = -EBUSY;
goto err_assign_vector; goto err_assign_vector;
...@@ -393,8 +431,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, ...@@ -393,8 +431,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* this, there's no way to go back except reset. * this, there's no way to go back except reset.
*/ */
list_for_each_entry(vq, &vdev->vqs, list) { list_for_each_entry(vq, &vdev->vqs, list) {
iowrite16(vq->index, &vp_dev->common->queue_select); vp_iowrite16(vq->index, &vp_dev->common->queue_select);
iowrite16(1, &vp_dev->common->queue_enable); vp_iowrite16(1, &vp_dev->common->queue_enable);
} }
return 0; return 0;
...@@ -405,13 +443,13 @@ static void del_vq(struct virtio_pci_vq_info *info) ...@@ -405,13 +443,13 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtqueue *vq = info->vq; struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
iowrite16(vq->index, &vp_dev->common->queue_select); vp_iowrite16(vq->index, &vp_dev->common->queue_select);
if (vp_dev->msix_enabled) { if (vp_dev->msix_enabled) {
iowrite16(VIRTIO_MSI_NO_VECTOR, vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
&vp_dev->common->queue_msix_vector); &vp_dev->common->queue_msix_vector);
/* Flush the write out to device */ /* Flush the write out to device */
ioread16(&vp_dev->common->queue_msix_vector); vp_ioread16(&vp_dev->common->queue_msix_vector);
} }
if (!vp_dev->notify_base) if (!vp_dev->notify_base)
...@@ -577,9 +615,6 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) ...@@ -577,9 +615,6 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
} }
vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
if (virtio_device_is_legacy_only(vp_dev->vdev.id))
return -ENODEV;
/* check for a common config: if not, use legacy mode (bar 0). */ /* check for a common config: if not, use legacy mode (bar 0). */
common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
IORESOURCE_IO | IORESOURCE_MEM); IORESOURCE_IO | IORESOURCE_MEM);
......
...@@ -61,8 +61,8 @@ struct lguest_data { ...@@ -61,8 +61,8 @@ struct lguest_data {
u32 tsc_khz; u32 tsc_khz;
/* Fields initialized by the Guest at boot: */ /* Fields initialized by the Guest at boot: */
/* Instruction range to suppress interrupts even if enabled */ /* Instruction to suppress interrupts even if enabled */
unsigned long noirq_start, noirq_end; unsigned long noirq_iret;
/* Address above which page tables are all identical. */ /* Address above which page tables are all identical. */
unsigned long kernel_address; unsigned long kernel_address;
/* The vector to try to use for system calls (0x40 or 0x80). */ /* The vector to try to use for system calls (0x40 or 0x80). */
......
...@@ -108,8 +108,6 @@ struct virtio_device { ...@@ -108,8 +108,6 @@ struct virtio_device {
void *priv; void *priv;
}; };
bool virtio_device_is_legacy_only(struct virtio_device_id id);
static inline struct virtio_device *dev_to_virtio(struct device *_dev) static inline struct virtio_device *dev_to_virtio(struct device *_dev)
{ {
return container_of(_dev, struct virtio_device, dev); return container_of(_dev, struct virtio_device, dev);
......
...@@ -298,13 +298,6 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) ...@@ -298,13 +298,6 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
} \ } \
} while(0) } while(0)
static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
{
u8 ret;
vdev->config->get(vdev, offset, &ret, sizeof(ret));
return ret;
}
/* Read @count fields, @bytes each. */ /* Read @count fields, @bytes each. */
static inline void __virtio_cread_many(struct virtio_device *vdev, static inline void __virtio_cread_many(struct virtio_device *vdev,
unsigned int offset, unsigned int offset,
...@@ -326,7 +319,6 @@ static inline void __virtio_cread_many(struct virtio_device *vdev, ...@@ -326,7 +319,6 @@ static inline void __virtio_cread_many(struct virtio_device *vdev,
} while (gen != old); } while (gen != old);
} }
static inline void virtio_cread_bytes(struct virtio_device *vdev, static inline void virtio_cread_bytes(struct virtio_device *vdev,
unsigned int offset, unsigned int offset,
void *buf, size_t len) void *buf, size_t len)
...@@ -334,6 +326,13 @@ static inline void virtio_cread_bytes(struct virtio_device *vdev, ...@@ -334,6 +326,13 @@ static inline void virtio_cread_bytes(struct virtio_device *vdev,
__virtio_cread_many(vdev, offset, buf, len, 1); __virtio_cread_many(vdev, offset, buf, len, 1);
} }
static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
{
u8 ret;
vdev->config->get(vdev, offset, &ret, sizeof(ret));
return ret;
}
static inline void virtio_cwrite8(struct virtio_device *vdev, static inline void virtio_cwrite8(struct virtio_device *vdev,
unsigned int offset, u8 val) unsigned int offset, u8 val)
{ {
...@@ -374,7 +373,6 @@ static inline u64 virtio_cread64(struct virtio_device *vdev, ...@@ -374,7 +373,6 @@ static inline u64 virtio_cread64(struct virtio_device *vdev,
unsigned int offset) unsigned int offset)
{ {
u64 ret; u64 ret;
vdev->config->get(vdev, offset, &ret, sizeof(ret));
__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
return virtio64_to_cpu(vdev, (__force __virtio64)ret); return virtio64_to_cpu(vdev, (__force __virtio64)ret);
} }
......
...@@ -21,19 +21,20 @@ ...@@ -21,19 +21,20 @@
* actually quite cheap. * actually quite cheap.
*/ */
#ifdef CONFIG_SMP
static inline void virtio_mb(bool weak_barriers) static inline void virtio_mb(bool weak_barriers)
{ {
#ifdef CONFIG_SMP
if (weak_barriers) if (weak_barriers)
smp_mb(); smp_mb();
else else
#endif
mb(); mb();
} }
static inline void virtio_rmb(bool weak_barriers) static inline void virtio_rmb(bool weak_barriers)
{ {
if (weak_barriers) if (weak_barriers)
smp_rmb(); dma_rmb();
else else
rmb(); rmb();
} }
...@@ -41,26 +42,10 @@ static inline void virtio_rmb(bool weak_barriers) ...@@ -41,26 +42,10 @@ static inline void virtio_rmb(bool weak_barriers)
static inline void virtio_wmb(bool weak_barriers) static inline void virtio_wmb(bool weak_barriers)
{ {
if (weak_barriers) if (weak_barriers)
smp_wmb(); dma_wmb();
else else
wmb(); wmb();
} }
#else
static inline void virtio_mb(bool weak_barriers)
{
mb();
}
static inline void virtio_rmb(bool weak_barriers)
{
rmb();
}
static inline void virtio_wmb(bool weak_barriers)
{
wmb();
}
#endif
struct virtio_device; struct virtio_device;
struct virtqueue; struct virtqueue;
......
...@@ -431,6 +431,7 @@ header-y += virtio_blk.h ...@@ -431,6 +431,7 @@ header-y += virtio_blk.h
header-y += virtio_config.h header-y += virtio_config.h
header-y += virtio_console.h header-y += virtio_console.h
header-y += virtio_ids.h header-y += virtio_ids.h
header-y += virtio_input.h
header-y += virtio_net.h header-y += virtio_net.h
header-y += virtio_pci.h header-y += virtio_pci.h
header-y += virtio_ring.h header-y += virtio_ring.h
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */ * SUCH DAMAGE. */
#include <linux/types.h>
#include <linux/virtio_ids.h> #include <linux/virtio_ids.h>
#include <linux/virtio_config.h> #include <linux/virtio_config.h>
...@@ -38,9 +39,9 @@ ...@@ -38,9 +39,9 @@
struct virtio_balloon_config { struct virtio_balloon_config {
/* Number of pages host wants Guest to give up. */ /* Number of pages host wants Guest to give up. */
__le32 num_pages; __u32 num_pages;
/* Number of pages we've actually got in balloon. */ /* Number of pages we've actually got in balloon. */
__le32 actual; __u32 actual;
}; };
#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */ #define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
...@@ -51,9 +52,32 @@ struct virtio_balloon_config { ...@@ -51,9 +52,32 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */ #define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
#define VIRTIO_BALLOON_S_NR 6 #define VIRTIO_BALLOON_S_NR 6
/*
* Memory statistics structure.
* Driver fills an array of these structures and passes to device.
*
* NOTE: fields are laid out in a way that would make compiler add padding
* between and after fields, so we have to use compiler-specific attributes to
* pack it, to disable this padding. This also often causes compiler to
* generate suboptimal code.
*
* We maintain this statistics structure format for backwards compatibility,
* but don't follow this example.
*
* If implementing a similar structure, do something like the below instead:
* struct virtio_balloon_stat {
* __virtio16 tag;
* __u8 reserved[6];
* __virtio64 val;
* };
*
* In other words, add explicit reserved fields to align field and
* structure boundaries at field size, avoiding compiler padding
* without the packed attribute.
*/
struct virtio_balloon_stat { struct virtio_balloon_stat {
__u16 tag; __virtio16 tag;
__u64 val; __virtio64 val;
} __attribute__((packed)); } __attribute__((packed));
#endif /* _LINUX_VIRTIO_BALLOON_H */ #endif /* _LINUX_VIRTIO_BALLOON_H */
...@@ -39,5 +39,6 @@ ...@@ -39,5 +39,6 @@
#define VIRTIO_ID_9P 9 /* 9p virtio console */ #define VIRTIO_ID_9P 9 /* 9p virtio console */
#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
#define VIRTIO_ID_CAIF 12 /* Virtio caif */ #define VIRTIO_ID_CAIF 12 /* Virtio caif */
#define VIRTIO_ID_INPUT 18 /* virtio input */
#endif /* _LINUX_VIRTIO_IDS_H */ #endif /* _LINUX_VIRTIO_IDS_H */
#ifndef _LINUX_VIRTIO_INPUT_H
#define _LINUX_VIRTIO_INPUT_H
/* This header is BSD licensed so anyone can use the definitions to implement
* compatible drivers/servers.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
#include <linux/types.h>
enum virtio_input_config_select {
VIRTIO_INPUT_CFG_UNSET = 0x00,
VIRTIO_INPUT_CFG_ID_NAME = 0x01,
VIRTIO_INPUT_CFG_ID_SERIAL = 0x02,
VIRTIO_INPUT_CFG_ID_DEVIDS = 0x03,
VIRTIO_INPUT_CFG_PROP_BITS = 0x10,
VIRTIO_INPUT_CFG_EV_BITS = 0x11,
VIRTIO_INPUT_CFG_ABS_INFO = 0x12,
};
struct virtio_input_absinfo {
__u32 min;
__u32 max;
__u32 fuzz;
__u32 flat;
__u32 res;
};
struct virtio_input_devids {
__u16 bustype;
__u16 vendor;
__u16 product;
__u16 version;
};
struct virtio_input_config {
__u8 select;
__u8 subsel;
__u8 size;
__u8 reserved[5];
union {
char string[128];
__u8 bitmap[128];
struct virtio_input_absinfo abs;
struct virtio_input_devids ids;
} u;
};
struct virtio_input_event {
__le16 type;
__le16 code;
__le32 value;
};
#endif /* _LINUX_VIRTIO_INPUT_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment