Commit d1038084 authored by Al Viro's avatar Al Viro

vmci: the same on the send side...

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 53f58d8e
...@@ -129,20 +129,6 @@ ...@@ -129,20 +129,6 @@
* *_MEM state, and vice versa. * *_MEM state, and vice versa.
*/ */
/*
* VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
* types are passed around to enqueue and dequeue routines. Note that
* often the functions passed are simply wrappers around memcpy
* itself.
*
* Note: In order for the memcpy typedefs to be compatible with the VMKernel,
* there's an unused last parameter for the hosted side. In
* ESX, that parameter holds a buffer type.
*/
typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
u64 queue_offset, const void *src,
size_t src_offset, size_t size);
/* The Kernel specific component of the struct vmci_queue structure. */ /* The Kernel specific component of the struct vmci_queue structure. */
struct vmci_queue_kern_if { struct vmci_queue_kern_if {
struct mutex __mutex; /* Protects the queue. */ struct mutex __mutex; /* Protects the queue. */
...@@ -348,11 +334,10 @@ static void *qp_alloc_queue(u64 size, u32 flags) ...@@ -348,11 +334,10 @@ static void *qp_alloc_queue(u64 size, u32 flags)
* by traversing the offset -> page translation structure for the queue. * by traversing the offset -> page translation structure for the queue.
* Assumes that offset + size does not wrap around in the queue. * Assumes that offset + size does not wrap around in the queue.
*/ */
static int __qp_memcpy_to_queue(struct vmci_queue *queue, static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
u64 queue_offset, u64 queue_offset,
const void *src, struct iov_iter *from,
size_t size, size_t size)
bool is_iovec)
{ {
struct vmci_queue_kern_if *kernel_if = queue->kernel_if; struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
size_t bytes_copied = 0; size_t bytes_copied = 0;
...@@ -377,23 +362,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue, ...@@ -377,23 +362,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
else else
to_copy = size - bytes_copied; to_copy = size - bytes_copied;
if (is_iovec) { if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
struct msghdr *msg = (struct msghdr *)src; from)) {
int err;
/* The iovec will track bytes_copied internally. */
err = memcpy_from_msg((u8 *)va + page_offset,
msg, to_copy);
if (err != 0) {
if (kernel_if->host) if (kernel_if->host)
kunmap(kernel_if->u.h.page[page_index]); kunmap(kernel_if->u.h.page[page_index]);
return VMCI_ERROR_INVALID_ARGS; return VMCI_ERROR_INVALID_ARGS;
} }
} else {
memcpy((u8 *)va + page_offset,
(u8 *)src + bytes_copied, to_copy);
}
bytes_copied += to_copy; bytes_copied += to_copy;
if (kernel_if->host) if (kernel_if->host)
kunmap(kernel_if->u.h.page[page_index]); kunmap(kernel_if->u.h.page[page_index]);
...@@ -554,30 +528,6 @@ static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) ...@@ -554,30 +528,6 @@ static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
return VMCI_SUCCESS; return VMCI_SUCCESS;
} }
static int qp_memcpy_to_queue(struct vmci_queue *queue,
u64 queue_offset,
const void *src, size_t src_offset, size_t size)
{
return __qp_memcpy_to_queue(queue, queue_offset,
(u8 *)src + src_offset, size, false);
}
/*
* Copies from a given iovec from a VMCI Queue.
*/
static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
u64 queue_offset,
const void *msg,
size_t src_offset, size_t size)
{
/*
* We ignore src_offset because src is really a struct iovec * and will
* maintain offset internally.
*/
return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
}
/* /*
* Allocates kernel VA space of specified size plus space for the queue * Allocates kernel VA space of specified size plus space for the queue
* and kernel interface. This is different from the guest queue allocator, * and kernel interface. This is different from the guest queue allocator,
...@@ -2590,12 +2540,11 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair) ...@@ -2590,12 +2540,11 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
struct vmci_queue *consume_q, struct vmci_queue *consume_q,
const u64 produce_q_size, const u64 produce_q_size,
const void *buf, struct iov_iter *from)
size_t buf_size,
vmci_memcpy_to_queue_func memcpy_to_queue)
{ {
s64 free_space; s64 free_space;
u64 tail; u64 tail;
size_t buf_size = iov_iter_count(from);
size_t written; size_t written;
ssize_t result; ssize_t result;
...@@ -2615,15 +2564,15 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, ...@@ -2615,15 +2564,15 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
written = (size_t) (free_space > buf_size ? buf_size : free_space); written = (size_t) (free_space > buf_size ? buf_size : free_space);
tail = vmci_q_header_producer_tail(produce_q->q_header); tail = vmci_q_header_producer_tail(produce_q->q_header);
if (likely(tail + written < produce_q_size)) { if (likely(tail + written < produce_q_size)) {
result = memcpy_to_queue(produce_q, tail, buf, 0, written); result = qp_memcpy_to_queue_iter(produce_q, tail, from, written);
} else { } else {
/* Tail pointer wraps around. */ /* Tail pointer wraps around. */
const size_t tmp = (size_t) (produce_q_size - tail); const size_t tmp = (size_t) (produce_q_size - tail);
result = memcpy_to_queue(produce_q, tail, buf, 0, tmp); result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp);
if (result >= VMCI_SUCCESS) if (result >= VMCI_SUCCESS)
result = memcpy_to_queue(produce_q, 0, buf, tmp, result = qp_memcpy_to_queue_iter(produce_q, 0, from,
written - tmp); written - tmp);
} }
...@@ -3078,18 +3027,21 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, ...@@ -3078,18 +3027,21 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
int buf_type) int buf_type)
{ {
ssize_t result; ssize_t result;
struct iov_iter from;
struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size};
if (!qpair || !buf) if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS; return VMCI_ERROR_INVALID_ARGS;
iov_iter_kvec(&from, WRITE | ITER_KVEC, &v, 1, buf_size);
qp_lock(qpair); qp_lock(qpair);
do { do {
result = qp_enqueue_locked(qpair->produce_q, result = qp_enqueue_locked(qpair->produce_q,
qpair->consume_q, qpair->consume_q,
qpair->produce_q_size, qpair->produce_q_size,
buf, buf_size, &from);
qp_memcpy_to_queue);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair)) !qp_wait_for_ready_queue(qpair))
...@@ -3219,8 +3171,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, ...@@ -3219,8 +3171,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
result = qp_enqueue_locked(qpair->produce_q, result = qp_enqueue_locked(qpair->produce_q,
qpair->consume_q, qpair->consume_q,
qpair->produce_q_size, qpair->produce_q_size,
msg, msg_data_left(msg), &msg->msg_iter);
qp_memcpy_to_queue_iov);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair)) !qp_wait_for_ready_queue(qpair))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment