Commit 4907a43d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux

Pull Hyper-V updates from Wei Liu:

 - a series from Boqun Feng to support page size larger than 4K

 - a few miscellaneous clean-ups

* tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  hv: clocksource: Add notrace attribute to read_hv_sched_clock_*() functions
  x86/hyperv: Remove aliases with X64 in their name
  PCI: hv: Document missing hv_pci_protocol_negotiation() parameter
  scsi: storvsc: Support PAGE_SIZE larger than 4K
  Driver: hv: util: Use VMBUS_RING_SIZE() for ringbuffer sizes
  HID: hyperv: Use VMBUS_RING_SIZE() for ringbuffer sizes
  Input: hyperv-keyboard: Use VMBUS_RING_SIZE() for ringbuffer sizes
  hv_netvsc: Use HV_HYP_PAGE_SIZE for Hyper-V communication
  hv: hyperv.h: Introduce some hvpfn helper functions
  Drivers: hv: vmbus: Move virt_to_hvpfn() to hyperv header
  Drivers: hv: Use HV_HYP_PAGE in hv_synic_enable_regs()
  Drivers: hv: vmbus: Introduce types of GPADL
  Drivers: hv: vmbus: Move __vmbus_open()
  Drivers: hv: vmbus: Always use HV_HYP_PAGE_SIZE for gpadl
  drivers: hv: remove cast from hyperv_die_event
parents da9803df 1f3aed01
...@@ -341,7 +341,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg) ...@@ -341,7 +341,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
return read_hv_clock_tsc(); return read_hv_clock_tsc();
} }
static u64 read_hv_sched_clock_tsc(void) static u64 notrace read_hv_sched_clock_tsc(void)
{ {
return (read_hv_clock_tsc() - hv_sched_clock_offset) * return (read_hv_clock_tsc() - hv_sched_clock_offset) *
(NSEC_PER_SEC / HV_CLOCK_HZ); (NSEC_PER_SEC / HV_CLOCK_HZ);
...@@ -404,7 +404,7 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg) ...@@ -404,7 +404,7 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
return read_hv_clock_msr(); return read_hv_clock_msr();
} }
static u64 read_hv_sched_clock_msr(void) static u64 notrace read_hv_sched_clock_msr(void)
{ {
return (read_hv_clock_msr() - hv_sched_clock_offset) * return (read_hv_clock_msr() - hv_sched_clock_offset) *
(NSEC_PER_SEC / HV_CLOCK_HZ); (NSEC_PER_SEC / HV_CLOCK_HZ);
......
...@@ -104,8 +104,8 @@ struct synthhid_input_report { ...@@ -104,8 +104,8 @@ struct synthhid_input_report {
#pragma pack(pop) #pragma pack(pop)
#define INPUTVSC_SEND_RING_BUFFER_SIZE (40 * 1024) #define INPUTVSC_SEND_RING_BUFFER_SIZE VMBUS_RING_SIZE(36 * 1024)
#define INPUTVSC_RECV_RING_BUFFER_SIZE (40 * 1024) #define INPUTVSC_RECV_RING_BUFFER_SIZE VMBUS_RING_SIZE(36 * 1024)
enum pipe_prot_msg_type { enum pipe_prot_msg_type {
......
...@@ -22,20 +22,97 @@ ...@@ -22,20 +22,97 @@
#include "hyperv_vmbus.h" #include "hyperv_vmbus.h"
#define NUM_PAGES_SPANNED(addr, len) \ /*
((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT)) * hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
*
* For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
*
* For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the header
* (because of the alignment requirement), however, the hypervisor only
* uses the first HV_HYP_PAGE_SIZE as the header, therefore leaving a
* (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
* ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
* total size that the guest uses minus twice of the gap size.
*/
static inline u32 hv_gpadl_size(enum hv_gpadl_type type, u32 size)
{
switch (type) {
case HV_GPADL_BUFFER:
return size;
case HV_GPADL_RING:
/* The size of a ringbuffer must be page-aligned */
BUG_ON(size % PAGE_SIZE);
/*
* Two things to notice here:
* 1) We're processing two ring buffers as a unit
* 2) We're skipping any space larger than HV_HYP_PAGE_SIZE in
* the first guest-size page of each of the two ring buffers.
* So we effectively subtract out two guest-size pages, and add
* back two Hyper-V size pages.
*/
return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
}
BUG();
return 0;
}
static unsigned long virt_to_hvpfn(void *addr) /*
* hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
* HV_HYP_PAGE) in a ring gpadl based on the
* offset in the guest
*
* @offset: the offset (in bytes) where the send ringbuffer starts in the
* virtual address space of the guest
*/
static inline u32 hv_ring_gpadl_send_hvpgoffset(u32 offset)
{ {
phys_addr_t paddr;
if (is_vmalloc_addr(addr)) /*
paddr = page_to_phys(vmalloc_to_page(addr)) + * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the
offset_in_page(addr); * header (because of the alignment requirement), however, the
* hypervisor only uses the first HV_HYP_PAGE_SIZE as the header,
* therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap.
*
* And to calculate the effective send offset in gpadl, we need to
* substract this gap.
*/
return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
}
/*
* hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
* the gpadl
*
* @type: the type of the gpadl
* @kbuffer: the pointer to the gpadl in the guest
* @size: the total size (in bytes) of the gpadl
* @send_offset: the offset (in bytes) where the send ringbuffer starts in the
* virtual address space of the guest
* @i: the index
*/
static inline u64 hv_gpadl_hvpfn(enum hv_gpadl_type type, void *kbuffer,
u32 size, u32 send_offset, int i)
{
int send_idx = hv_ring_gpadl_send_hvpgoffset(send_offset);
unsigned long delta = 0UL;
switch (type) {
case HV_GPADL_BUFFER:
break;
case HV_GPADL_RING:
if (i == 0)
delta = 0;
else if (i <= send_idx)
delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
else else
paddr = __pa(addr); delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
break;
default:
BUG();
break;
}
return paddr >> PAGE_SHIFT; return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
} }
/* /*
...@@ -112,160 +189,6 @@ int vmbus_alloc_ring(struct vmbus_channel *newchannel, ...@@ -112,160 +189,6 @@ int vmbus_alloc_ring(struct vmbus_channel *newchannel,
} }
EXPORT_SYMBOL_GPL(vmbus_alloc_ring); EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
static int __vmbus_open(struct vmbus_channel *newchannel,
void *userdata, u32 userdatalen,
void (*onchannelcallback)(void *context), void *context)
{
struct vmbus_channel_open_channel *open_msg;
struct vmbus_channel_msginfo *open_info = NULL;
struct page *page = newchannel->ringbuffer_page;
u32 send_pages, recv_pages;
unsigned long flags;
int err;
if (userdatalen > MAX_USER_DEFINED_BYTES)
return -EINVAL;
send_pages = newchannel->ringbuffer_send_offset;
recv_pages = newchannel->ringbuffer_pagecount - send_pages;
if (newchannel->state != CHANNEL_OPEN_STATE)
return -EINVAL;
newchannel->state = CHANNEL_OPENING_STATE;
newchannel->onchannel_callback = onchannelcallback;
newchannel->channel_callback_context = context;
err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
if (err)
goto error_clean_ring;
err = hv_ringbuffer_init(&newchannel->inbound,
&page[send_pages], recv_pages);
if (err)
goto error_clean_ring;
/* Establish the gpadl for the ring buffer */
newchannel->ringbuffer_gpadlhandle = 0;
err = vmbus_establish_gpadl(newchannel,
page_address(newchannel->ringbuffer_page),
(send_pages + recv_pages) << PAGE_SHIFT,
&newchannel->ringbuffer_gpadlhandle);
if (err)
goto error_clean_ring;
/* Create and init the channel open message */
open_info = kmalloc(sizeof(*open_info) +
sizeof(struct vmbus_channel_open_channel),
GFP_KERNEL);
if (!open_info) {
err = -ENOMEM;
goto error_free_gpadl;
}
init_completion(&open_info->waitevent);
open_info->waiting_channel = newchannel;
open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
open_msg->openid = newchannel->offermsg.child_relid;
open_msg->child_relid = newchannel->offermsg.child_relid;
open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
if (userdatalen)
memcpy(open_msg->userdata, userdata, userdatalen);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&open_info->msglistentry,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
if (newchannel->rescind) {
err = -ENODEV;
goto error_free_info;
}
err = vmbus_post_msg(open_msg,
sizeof(struct vmbus_channel_open_channel), true);
trace_vmbus_open(open_msg, err);
if (err != 0)
goto error_clean_msglist;
wait_for_completion(&open_info->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
if (newchannel->rescind) {
err = -ENODEV;
goto error_free_info;
}
if (open_info->response.open_result.status) {
err = -EAGAIN;
goto error_free_info;
}
newchannel->state = CHANNEL_OPENED_STATE;
kfree(open_info);
return 0;
error_clean_msglist:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
error_free_info:
kfree(open_info);
error_free_gpadl:
vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
newchannel->ringbuffer_gpadlhandle = 0;
error_clean_ring:
hv_ringbuffer_cleanup(&newchannel->outbound);
hv_ringbuffer_cleanup(&newchannel->inbound);
newchannel->state = CHANNEL_OPEN_STATE;
return err;
}
/*
* vmbus_connect_ring - Open the channel but reuse ring buffer
*/
int vmbus_connect_ring(struct vmbus_channel *newchannel,
void (*onchannelcallback)(void *context), void *context)
{
return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
}
EXPORT_SYMBOL_GPL(vmbus_connect_ring);
/*
* vmbus_open - Open the specified channel.
*/
int vmbus_open(struct vmbus_channel *newchannel,
u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
void *userdata, u32 userdatalen,
void (*onchannelcallback)(void *context), void *context)
{
int err;
err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
recv_ringbuffer_size);
if (err)
return err;
err = __vmbus_open(newchannel, userdata, userdatalen,
onchannelcallback, context);
if (err)
vmbus_free_ring(newchannel);
return err;
}
EXPORT_SYMBOL_GPL(vmbus_open);
/* Used for Hyper-V Socket: a guest client's connect() to the host */ /* Used for Hyper-V Socket: a guest client's connect() to the host */
int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
const guid_t *shv_host_servie_id) const guid_t *shv_host_servie_id)
...@@ -317,7 +240,8 @@ EXPORT_SYMBOL_GPL(vmbus_send_modifychannel); ...@@ -317,7 +240,8 @@ EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
/* /*
* create_gpadl_header - Creates a gpadl for the specified buffer * create_gpadl_header - Creates a gpadl for the specified buffer
*/ */
static int create_gpadl_header(void *kbuffer, u32 size, static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
u32 size, u32 send_offset,
struct vmbus_channel_msginfo **msginfo) struct vmbus_channel_msginfo **msginfo)
{ {
int i; int i;
...@@ -330,7 +254,7 @@ static int create_gpadl_header(void *kbuffer, u32 size, ...@@ -330,7 +254,7 @@ static int create_gpadl_header(void *kbuffer, u32 size,
int pfnsum, pfncount, pfnleft, pfncurr, pfnsize; int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
pagecount = size >> PAGE_SHIFT; pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
/* do we need a gpadl body msg */ /* do we need a gpadl body msg */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE - pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
...@@ -357,10 +281,10 @@ static int create_gpadl_header(void *kbuffer, u32 size, ...@@ -357,10 +281,10 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range_buflen = sizeof(struct gpa_range) + gpadl_header->range_buflen = sizeof(struct gpa_range) +
pagecount * sizeof(u64); pagecount * sizeof(u64);
gpadl_header->range[0].byte_offset = 0; gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size; gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
for (i = 0; i < pfncount; i++) for (i = 0; i < pfncount; i++)
gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn( gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
kbuffer + PAGE_SIZE * i); type, kbuffer, size, send_offset, i);
*msginfo = msgheader; *msginfo = msgheader;
pfnsum = pfncount; pfnsum = pfncount;
...@@ -411,8 +335,8 @@ static int create_gpadl_header(void *kbuffer, u32 size, ...@@ -411,8 +335,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
* so the hypervisor guarantees that this is ok. * so the hypervisor guarantees that this is ok.
*/ */
for (i = 0; i < pfncurr; i++) for (i = 0; i < pfncurr; i++)
gpadl_body->pfn[i] = virt_to_hvpfn( gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
kbuffer + PAGE_SIZE * (pfnsum + i)); kbuffer, size, send_offset, pfnsum + i);
/* add to msg header */ /* add to msg header */
list_add_tail(&msgbody->msglistentry, list_add_tail(&msgbody->msglistentry,
...@@ -438,10 +362,10 @@ static int create_gpadl_header(void *kbuffer, u32 size, ...@@ -438,10 +362,10 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range_buflen = sizeof(struct gpa_range) + gpadl_header->range_buflen = sizeof(struct gpa_range) +
pagecount * sizeof(u64); pagecount * sizeof(u64);
gpadl_header->range[0].byte_offset = 0; gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size; gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
for (i = 0; i < pagecount; i++) for (i = 0; i < pagecount; i++)
gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn( gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
kbuffer + PAGE_SIZE * i); type, kbuffer, size, send_offset, i);
*msginfo = msgheader; *msginfo = msgheader;
} }
...@@ -454,15 +378,20 @@ static int create_gpadl_header(void *kbuffer, u32 size, ...@@ -454,15 +378,20 @@ static int create_gpadl_header(void *kbuffer, u32 size,
} }
/* /*
* vmbus_establish_gpadl - Establish a GPADL for the specified buffer * __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
* *
* @channel: a channel * @channel: a channel
* @type: the type of the corresponding GPADL, only meaningful for the guest.
* @kbuffer: from kmalloc or vmalloc * @kbuffer: from kmalloc or vmalloc
* @size: page-size multiple * @size: page-size multiple
* @send_offset: the offset (in bytes) where the send ring buffer starts,
* should be 0 for BUFFER type gpadl
* @gpadl_handle: some funky thing * @gpadl_handle: some funky thing
*/ */
int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
u32 size, u32 *gpadl_handle) enum hv_gpadl_type type, void *kbuffer,
u32 size, u32 send_offset,
u32 *gpadl_handle)
{ {
struct vmbus_channel_gpadl_header *gpadlmsg; struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body; struct vmbus_channel_gpadl_body *gpadl_body;
...@@ -476,7 +405,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, ...@@ -476,7 +405,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
next_gpadl_handle = next_gpadl_handle =
(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1); (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
ret = create_gpadl_header(kbuffer, size, &msginfo); ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
if (ret) if (ret)
return ret; return ret;
...@@ -557,8 +486,184 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, ...@@ -557,8 +486,184 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
kfree(msginfo); kfree(msginfo);
return ret; return ret;
} }
/*
* vmbus_establish_gpadl - Establish a GPADL for the specified buffer
*
* @channel: a channel
* @kbuffer: from kmalloc or vmalloc
* @size: page-size multiple
* @gpadl_handle: some funky thing
*/
int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
u32 size, u32 *gpadl_handle)
{
return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
0U, gpadl_handle);
}
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl); EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
static int __vmbus_open(struct vmbus_channel *newchannel,
void *userdata, u32 userdatalen,
void (*onchannelcallback)(void *context), void *context)
{
struct vmbus_channel_open_channel *open_msg;
struct vmbus_channel_msginfo *open_info = NULL;
struct page *page = newchannel->ringbuffer_page;
u32 send_pages, recv_pages;
unsigned long flags;
int err;
if (userdatalen > MAX_USER_DEFINED_BYTES)
return -EINVAL;
send_pages = newchannel->ringbuffer_send_offset;
recv_pages = newchannel->ringbuffer_pagecount - send_pages;
if (newchannel->state != CHANNEL_OPEN_STATE)
return -EINVAL;
newchannel->state = CHANNEL_OPENING_STATE;
newchannel->onchannel_callback = onchannelcallback;
newchannel->channel_callback_context = context;
err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
if (err)
goto error_clean_ring;
err = hv_ringbuffer_init(&newchannel->inbound,
&page[send_pages], recv_pages);
if (err)
goto error_clean_ring;
/* Establish the gpadl for the ring buffer */
newchannel->ringbuffer_gpadlhandle = 0;
err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
page_address(newchannel->ringbuffer_page),
(send_pages + recv_pages) << PAGE_SHIFT,
newchannel->ringbuffer_send_offset << PAGE_SHIFT,
&newchannel->ringbuffer_gpadlhandle);
if (err)
goto error_clean_ring;
/* Create and init the channel open message */
open_info = kmalloc(sizeof(*open_info) +
sizeof(struct vmbus_channel_open_channel),
GFP_KERNEL);
if (!open_info) {
err = -ENOMEM;
goto error_free_gpadl;
}
init_completion(&open_info->waitevent);
open_info->waiting_channel = newchannel;
open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
open_msg->openid = newchannel->offermsg.child_relid;
open_msg->child_relid = newchannel->offermsg.child_relid;
open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
/*
* The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
* the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
* here we calculate it into HV_HYP_PAGE.
*/
open_msg->downstream_ringbuffer_pageoffset =
hv_ring_gpadl_send_hvpgoffset(send_pages << PAGE_SHIFT);
open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
if (userdatalen)
memcpy(open_msg->userdata, userdata, userdatalen);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&open_info->msglistentry,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
if (newchannel->rescind) {
err = -ENODEV;
goto error_free_info;
}
err = vmbus_post_msg(open_msg,
sizeof(struct vmbus_channel_open_channel), true);
trace_vmbus_open(open_msg, err);
if (err != 0)
goto error_clean_msglist;
wait_for_completion(&open_info->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
if (newchannel->rescind) {
err = -ENODEV;
goto error_free_info;
}
if (open_info->response.open_result.status) {
err = -EAGAIN;
goto error_free_info;
}
newchannel->state = CHANNEL_OPENED_STATE;
kfree(open_info);
return 0;
error_clean_msglist:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
error_free_info:
kfree(open_info);
error_free_gpadl:
vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
newchannel->ringbuffer_gpadlhandle = 0;
error_clean_ring:
hv_ringbuffer_cleanup(&newchannel->outbound);
hv_ringbuffer_cleanup(&newchannel->inbound);
newchannel->state = CHANNEL_OPEN_STATE;
return err;
}
/*
* vmbus_connect_ring - Open the channel but reuse ring buffer
*/
int vmbus_connect_ring(struct vmbus_channel *newchannel,
void (*onchannelcallback)(void *context), void *context)
{
return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
}
EXPORT_SYMBOL_GPL(vmbus_connect_ring);
/*
* vmbus_open - Open the specified channel.
*/
int vmbus_open(struct vmbus_channel *newchannel,
u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
void *userdata, u32 userdatalen,
void (*onchannelcallback)(void *context), void *context)
{
int err;
err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
recv_ringbuffer_size);
if (err)
return err;
err = __vmbus_open(newchannel, userdata, userdatalen,
onchannelcallback, context);
if (err)
vmbus_free_ring(newchannel);
return err;
}
EXPORT_SYMBOL_GPL(vmbus_open);
/* /*
* vmbus_teardown_gpadl -Teardown the specified GPADL handle * vmbus_teardown_gpadl -Teardown the specified GPADL handle
*/ */
......
...@@ -165,7 +165,7 @@ void hv_synic_enable_regs(unsigned int cpu) ...@@ -165,7 +165,7 @@ void hv_synic_enable_regs(unsigned int cpu)
hv_get_simp(simp.as_uint64); hv_get_simp(simp.as_uint64);
simp.simp_enabled = 1; simp.simp_enabled = 1;
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page) simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
>> PAGE_SHIFT; >> HV_HYP_PAGE_SHIFT;
hv_set_simp(simp.as_uint64); hv_set_simp(simp.as_uint64);
...@@ -173,7 +173,7 @@ void hv_synic_enable_regs(unsigned int cpu) ...@@ -173,7 +173,7 @@ void hv_synic_enable_regs(unsigned int cpu)
hv_get_siefp(siefp.as_uint64); hv_get_siefp(siefp.as_uint64);
siefp.siefp_enabled = 1; siefp.siefp_enabled = 1;
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page) siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
>> PAGE_SHIFT; >> HV_HYP_PAGE_SHIFT;
hv_set_siefp(siefp.as_uint64); hv_set_siefp(siefp.as_uint64);
......
...@@ -500,6 +500,9 @@ static void heartbeat_onchannelcallback(void *context) ...@@ -500,6 +500,9 @@ static void heartbeat_onchannelcallback(void *context)
} }
} }
#define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
#define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
static int util_probe(struct hv_device *dev, static int util_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id) const struct hv_vmbus_device_id *dev_id)
{ {
...@@ -530,8 +533,8 @@ static int util_probe(struct hv_device *dev, ...@@ -530,8 +533,8 @@ static int util_probe(struct hv_device *dev,
hv_set_drvdata(dev, srv); hv_set_drvdata(dev, srv);
ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE, ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb, HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
dev->channel); dev->channel);
if (ret) if (ret)
goto error; goto error;
...@@ -590,8 +593,8 @@ static int util_resume(struct hv_device *dev) ...@@ -590,8 +593,8 @@ static int util_resume(struct hv_device *dev)
return ret; return ret;
} }
ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE, ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb, HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
dev->channel); dev->channel);
return ret; return ret;
} }
......
...@@ -83,7 +83,7 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val, ...@@ -83,7 +83,7 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
static int hyperv_die_event(struct notifier_block *nb, unsigned long val, static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
void *args) void *args)
{ {
struct die_args *die = (struct die_args *)args; struct die_args *die = args;
struct pt_regs *regs = die->regs; struct pt_regs *regs = die->regs;
/* Don't notify Hyper-V if the die event is other than oops */ /* Don't notify Hyper-V if the die event is other than oops */
......
...@@ -75,8 +75,8 @@ struct synth_kbd_keystroke { ...@@ -75,8 +75,8 @@ struct synth_kbd_keystroke {
#define HK_MAXIMUM_MESSAGE_SIZE 256 #define HK_MAXIMUM_MESSAGE_SIZE 256
#define KBD_VSC_SEND_RING_BUFFER_SIZE (40 * 1024) #define KBD_VSC_SEND_RING_BUFFER_SIZE VMBUS_RING_SIZE(36 * 1024)
#define KBD_VSC_RECV_RING_BUFFER_SIZE (40 * 1024) #define KBD_VSC_RECV_RING_BUFFER_SIZE VMBUS_RING_SIZE(36 * 1024)
#define XTKBD_EMUL0 0xe0 #define XTKBD_EMUL0 0xe0
#define XTKBD_EMUL1 0xe1 #define XTKBD_EMUL1 0xe1
......
...@@ -846,7 +846,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, ...@@ -846,7 +846,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
} }
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT); char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
u32 offset = pb[i].offset; u32 offset = pb[i].offset;
u32 len = pb[i].len; u32 len = pb[i].len;
......
...@@ -373,32 +373,29 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, ...@@ -373,32 +373,29 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
return txq; return txq;
} }
static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
struct hv_page_buffer *pb) struct hv_page_buffer *pb)
{ {
int j = 0; int j = 0;
/* Deal with compound pages by ignoring unused part hvpfn += offset >> HV_HYP_PAGE_SHIFT;
* of the page. offset = offset & ~HV_HYP_PAGE_MASK;
*/
page += (offset >> PAGE_SHIFT);
offset &= ~PAGE_MASK;
while (len > 0) { while (len > 0) {
unsigned long bytes; unsigned long bytes;
bytes = PAGE_SIZE - offset; bytes = HV_HYP_PAGE_SIZE - offset;
if (bytes > len) if (bytes > len)
bytes = len; bytes = len;
pb[j].pfn = page_to_pfn(page); pb[j].pfn = hvpfn;
pb[j].offset = offset; pb[j].offset = offset;
pb[j].len = bytes; pb[j].len = bytes;
offset += bytes; offset += bytes;
len -= bytes; len -= bytes;
if (offset == PAGE_SIZE && len) { if (offset == HV_HYP_PAGE_SIZE && len) {
page++; hvpfn++;
offset = 0; offset = 0;
j++; j++;
} }
...@@ -421,23 +418,26 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, ...@@ -421,23 +418,26 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
* 2. skb linear data * 2. skb linear data
* 3. skb fragment data * 3. skb fragment data
*/ */
slots_used += fill_pg_buf(virt_to_page(hdr), slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
offset_in_page(hdr), offset_in_hvpage(hdr),
len, &pb[slots_used]); len,
&pb[slots_used]);
packet->rmsg_size = len; packet->rmsg_size = len;
packet->rmsg_pgcnt = slots_used; packet->rmsg_pgcnt = slots_used;
slots_used += fill_pg_buf(virt_to_page(data), slots_used += fill_pg_buf(virt_to_hvpfn(data),
offset_in_page(data), offset_in_hvpage(data),
skb_headlen(skb), &pb[slots_used]); skb_headlen(skb),
&pb[slots_used]);
for (i = 0; i < frags; i++) { for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i; skb_frag_t *frag = skb_shinfo(skb)->frags + i;
slots_used += fill_pg_buf(skb_frag_page(frag), slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
skb_frag_off(frag), skb_frag_off(frag),
skb_frag_size(frag), &pb[slots_used]); skb_frag_size(frag),
&pb[slots_used]);
} }
return slots_used; return slots_used;
} }
...@@ -453,8 +453,8 @@ static int count_skb_frag_slots(struct sk_buff *skb) ...@@ -453,8 +453,8 @@ static int count_skb_frag_slots(struct sk_buff *skb)
unsigned long offset = skb_frag_off(frag); unsigned long offset = skb_frag_off(frag);
/* Skip unused frames from start of page */ /* Skip unused frames from start of page */
offset &= ~PAGE_MASK; offset &= ~HV_HYP_PAGE_MASK;
pages += PFN_UP(offset + size); pages += HVPFN_UP(offset + size);
} }
return pages; return pages;
} }
...@@ -462,12 +462,12 @@ static int count_skb_frag_slots(struct sk_buff *skb) ...@@ -462,12 +462,12 @@ static int count_skb_frag_slots(struct sk_buff *skb)
static int netvsc_get_slots(struct sk_buff *skb) static int netvsc_get_slots(struct sk_buff *skb)
{ {
char *data = skb->data; char *data = skb->data;
unsigned int offset = offset_in_page(data); unsigned int offset = offset_in_hvpage(data);
unsigned int len = skb_headlen(skb); unsigned int len = skb_headlen(skb);
int slots; int slots;
int frag_slots; int frag_slots;
slots = DIV_ROUND_UP(offset + len, PAGE_SIZE); slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE);
frag_slots = count_skb_frag_slots(skb); frag_slots = count_skb_frag_slots(skb);
return slots + frag_slots; return slots + frag_slots;
} }
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
static void rndis_set_multicast(struct work_struct *w); static void rndis_set_multicast(struct work_struct *w);
#define RNDIS_EXT_LEN PAGE_SIZE #define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
struct rndis_request { struct rndis_request {
struct list_head list_ent; struct list_head list_ent;
struct completion wait_event; struct completion wait_event;
...@@ -215,18 +215,17 @@ static int rndis_filter_send_request(struct rndis_device *dev, ...@@ -215,18 +215,17 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->page_buf_cnt = 1; packet->page_buf_cnt = 1;
pb[0].pfn = virt_to_phys(&req->request_msg) >> pb[0].pfn = virt_to_phys(&req->request_msg) >>
PAGE_SHIFT; HV_HYP_PAGE_SHIFT;
pb[0].len = req->request_msg.msg_len; pb[0].len = req->request_msg.msg_len;
pb[0].offset = pb[0].offset = offset_in_hvpage(&req->request_msg);
(unsigned long)&req->request_msg & (PAGE_SIZE - 1);
/* Add one page_buf when request_msg crossing page boundary */ /* Add one page_buf when request_msg crossing page boundary */
if (pb[0].offset + pb[0].len > PAGE_SIZE) { if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
packet->page_buf_cnt++; packet->page_buf_cnt++;
pb[0].len = PAGE_SIZE - pb[0].len = HV_HYP_PAGE_SIZE -
pb[0].offset; pb[0].offset;
pb[1].pfn = virt_to_phys((void *)&req->request_msg pb[1].pfn = virt_to_phys((void *)&req->request_msg
+ pb[0].len) >> PAGE_SHIFT; + pb[0].len) >> HV_HYP_PAGE_SHIFT;
pb[1].offset = 0; pb[1].offset = 0;
pb[1].len = req->request_msg.msg_len - pb[1].len = req->request_msg.msg_len -
pb[0].len; pb[0].len;
......
...@@ -2507,7 +2507,10 @@ static void hv_pci_onchannelcallback(void *context) ...@@ -2507,7 +2507,10 @@ static void hv_pci_onchannelcallback(void *context)
/** /**
* hv_pci_protocol_negotiation() - Set up protocol * hv_pci_protocol_negotiation() - Set up protocol
* @hdev: VMBus's tracking struct for this root PCI bus * @hdev: VMBus's tracking struct for this root PCI bus.
* @version: Array of supported channel protocol versions in
* the order of probing - highest go first.
* @num_version: Number of elements in the version array.
* *
* This driver is intended to support running on Windows 10 * This driver is intended to support running on Windows 10
* (server) and later versions. It will not run on earlier * (server) and later versions. It will not run on earlier
......
...@@ -1739,24 +1739,66 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) ...@@ -1739,24 +1739,66 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
payload_sz = sizeof(cmd_request->mpb); payload_sz = sizeof(cmd_request->mpb);
if (sg_count) { if (sg_count) {
if (sg_count > MAX_PAGE_BUFFER_COUNT) { unsigned int hvpgoff = 0;
unsigned long offset_in_hvpg = sgl->offset & ~HV_HYP_PAGE_MASK;
unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
u64 hvpfn;
payload_sz = (sg_count * sizeof(u64) + if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
payload_sz = (hvpg_count * sizeof(u64) +
sizeof(struct vmbus_packet_mpb_array)); sizeof(struct vmbus_packet_mpb_array));
payload = kzalloc(payload_sz, GFP_ATOMIC); payload = kzalloc(payload_sz, GFP_ATOMIC);
if (!payload) if (!payload)
return SCSI_MLQUEUE_DEVICE_BUSY; return SCSI_MLQUEUE_DEVICE_BUSY;
} }
/*
* sgl is a list of PAGEs, and payload->range.pfn_array
* expects the page number in the unit of HV_HYP_PAGE_SIZE (the
* page size that Hyper-V uses, so here we need to divide PAGEs
* into HV_HYP_PAGE in case that PAGE_SIZE > HV_HYP_PAGE_SIZE.
* Besides, payload->range.offset should be the offset in one
* HV_HYP_PAGE.
*/
payload->range.len = length; payload->range.len = length;
payload->range.offset = sgl[0].offset; payload->range.offset = offset_in_hvpg;
hvpgoff = sgl->offset >> HV_HYP_PAGE_SHIFT;
cur_sgl = sgl; cur_sgl = sgl;
for (i = 0; i < sg_count; i++) { for (i = 0; i < hvpg_count; i++) {
payload->range.pfn_array[i] = /*
page_to_pfn(sg_page((cur_sgl))); * 'i' is the index of hv pages in the payload and
* 'hvpgoff' is the offset (in hv pages) of the first
* hv page in the the first page. The relationship
* between the sum of 'i' and 'hvpgoff' and the offset
* (in hv pages) in a payload page ('hvpgoff_in_page')
* is as follow:
*
* |------------------ PAGE -------------------|
* | NR_HV_HYP_PAGES_IN_PAGE hvpgs in total |
* |hvpg|hvpg| ... |hvpg|... |hvpg|
* ^ ^ ^ ^
* +-hvpgoff-+ +-hvpgoff_in_page-+
* ^ |
* +--------------------- i ---------------------------+
*/
unsigned int hvpgoff_in_page =
(i + hvpgoff) % NR_HV_HYP_PAGES_IN_PAGE;
/*
* Two cases that we need to fetch a page:
* 1) i == 0, the first step or
* 2) hvpgoff_in_page == 0, when we reach the boundary
* of a page.
*/
if (hvpgoff_in_page == 0 || i == 0) {
hvpfn = page_to_hvpfn(sg_page(cur_sgl));
cur_sgl = sg_next(cur_sgl); cur_sgl = sg_next(cur_sgl);
} }
payload->range.pfn_array[i] = hvpfn + hvpgoff_in_page;
}
} }
cmd_request->payload = payload; cmd_request->payload = payload;
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <uapi/linux/hyperv.h> #include <uapi/linux/hyperv.h>
#include <linux/mm.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/list.h> #include <linux/list.h>
...@@ -23,12 +24,55 @@ ...@@ -23,12 +24,55 @@
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/reciprocal_div.h> #include <linux/reciprocal_div.h>
#include <asm/hyperv-tlfs.h>
#define MAX_PAGE_BUFFER_COUNT 32 #define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
#pragma pack(push, 1) #pragma pack(push, 1)
/*
* Types for GPADL, decides is how GPADL header is created.
*
* It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
* same as HV_HYP_PAGE_SIZE.
*
* If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
* will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
* into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
* HV_HYP_PAGE will be different between different types of GPADL, for example
* if PAGE_SIZE is 64K:
*
* BUFFER:
*
* gva: |-- 64k --|-- 64k --| ... |
* gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
* index: 0 1 2 15 16 17 18 .. 31 32 ...
* | | ... | | | ... | ...
* v V V V V V
* gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
* index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
*
* RING:
*
* | header | data | header | data |
* gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
* gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
* index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
* | / / / | / /
* | / / / | / /
* | / / ... / ... | / ... /
* | / / / | / /
* | / / / | / /
* V V V V V V v
* gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
* index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
*/
enum hv_gpadl_type {
HV_GPADL_BUFFER,
HV_GPADL_RING
};
/* Single-page buffer */ /* Single-page buffer */
struct hv_page_buffer { struct hv_page_buffer {
u32 len; u32 len;
...@@ -111,7 +155,7 @@ struct hv_ring_buffer { ...@@ -111,7 +155,7 @@ struct hv_ring_buffer {
} feature_bits; } feature_bits;
/* Pad it to PAGE_SIZE so that data starts on page boundary */ /* Pad it to PAGE_SIZE so that data starts on page boundary */
u8 reserved2[4028]; u8 reserved2[PAGE_SIZE - 68];
/* /*
* Ring data starts here + RingDataStartOffset * Ring data starts here + RingDataStartOffset
...@@ -120,6 +164,10 @@ struct hv_ring_buffer { ...@@ -120,6 +164,10 @@ struct hv_ring_buffer {
u8 buffer[]; u8 buffer[];
} __packed; } __packed;
/* Calculate the proper size of a ringbuffer, it must be page-aligned */
#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
(payload_sz))
struct hv_ring_buffer_info { struct hv_ring_buffer_info {
struct hv_ring_buffer *ring_buffer; struct hv_ring_buffer *ring_buffer;
u32 ring_size; /* Include the shared header */ u32 ring_size; /* Include the shared header */
...@@ -1630,4 +1678,22 @@ struct hyperv_pci_block_ops { ...@@ -1630,4 +1678,22 @@ struct hyperv_pci_block_ops {
extern struct hyperv_pci_block_ops hvpci_block_ops; extern struct hyperv_pci_block_ops hvpci_block_ops;
static inline unsigned long virt_to_hvpfn(void *addr)
{
phys_addr_t paddr;
if (is_vmalloc_addr(addr))
paddr = page_to_phys(vmalloc_to_page(addr)) +
offset_in_page(addr);
else
paddr = __pa(addr);
return paddr >> HV_HYP_PAGE_SHIFT;
}
#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
#define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
#define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
#define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
#endif /* _HYPERV_H */ #endif /* _HYPERV_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment