Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
a91eba5b
Commit
a91eba5b
authored
Sep 25, 2009
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' of
ssh://master.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-davem
parents
b8273570
0aea51c3
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
88 additions
and
141 deletions
+88
-141
drivers/net/virtio_net.c
drivers/net/virtio_net.c
+88
-141
No files found.
drivers/net/virtio_net.c
View file @
a91eba5b
/* A
simple
network driver using virtio.
/* A network driver using virtio.
*
* Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
*
...
...
@@ -48,19 +48,9 @@ struct virtnet_info
struct
napi_struct
napi
;
unsigned
int
status
;
/* The skb we couldn't send because buffers were full. */
struct
sk_buff
*
last_xmit_skb
;
/* If we need to free in a timer, this is it. */
struct
timer_list
xmit_free_timer
;
/* Number of input buffers, and max we've ever had. */
unsigned
int
num
,
max
;
/* For cleaning up after transmission. */
struct
tasklet_struct
tasklet
;
bool
free_in_tasklet
;
/* I like... big packets and I cannot lie! */
bool
big_packets
;
...
...
@@ -78,9 +68,17 @@ struct virtnet_info
struct
page
*
pages
;
};
static
inline
void
*
skb_vnet_hdr
(
struct
sk_buff
*
skb
)
struct
skb_vnet_hdr
{
union
{
struct
virtio_net_hdr
hdr
;
struct
virtio_net_hdr_mrg_rxbuf
mhdr
;
};
unsigned
int
num_sg
;
};
static
inline
struct
skb_vnet_hdr
*
skb_vnet_hdr
(
struct
sk_buff
*
skb
)
{
return
(
struct
virtio_
net_hdr
*
)
skb
->
cb
;
return
(
struct
skb_v
net_hdr
*
)
skb
->
cb
;
}
static
void
give_a_page
(
struct
virtnet_info
*
vi
,
struct
page
*
page
)
...
...
@@ -119,17 +117,13 @@ static void skb_xmit_done(struct virtqueue *svq)
/* We were probably waiting for more output buffers. */
netif_wake_queue
(
vi
->
dev
);
/* Make sure we re-xmit last_xmit_skb: if there are no more packets
* queued, start_xmit won't be called. */
tasklet_schedule
(
&
vi
->
tasklet
);
}
static
void
receive_skb
(
struct
net_device
*
dev
,
struct
sk_buff
*
skb
,
unsigned
len
)
{
struct
virtnet_info
*
vi
=
netdev_priv
(
dev
);
struct
virtio_
net_hdr
*
hdr
=
skb_vnet_hdr
(
skb
);
struct
skb_v
net_hdr
*
hdr
=
skb_vnet_hdr
(
skb
);
int
err
;
int
i
;
...
...
@@ -140,7 +134,6 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
}
if
(
vi
->
mergeable_rx_bufs
)
{
struct
virtio_net_hdr_mrg_rxbuf
*
mhdr
=
skb_vnet_hdr
(
skb
);
unsigned
int
copy
;
char
*
p
=
page_address
(
skb_shinfo
(
skb
)
->
frags
[
0
].
page
);
...
...
@@ -148,8 +141,8 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
len
=
PAGE_SIZE
;
len
-=
sizeof
(
struct
virtio_net_hdr_mrg_rxbuf
);
memcpy
(
hdr
,
p
,
sizeof
(
*
mhdr
));
p
+=
sizeof
(
*
mhdr
);
memcpy
(
&
hdr
->
mhdr
,
p
,
sizeof
(
hdr
->
mhdr
));
p
+=
sizeof
(
hdr
->
mhdr
);
copy
=
len
;
if
(
copy
>
skb_tailroom
(
skb
))
...
...
@@ -164,13 +157,13 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
skb_shinfo
(
skb
)
->
nr_frags
--
;
}
else
{
skb_shinfo
(
skb
)
->
frags
[
0
].
page_offset
+=
sizeof
(
*
mhdr
)
+
copy
;
sizeof
(
hdr
->
mhdr
)
+
copy
;
skb_shinfo
(
skb
)
->
frags
[
0
].
size
=
len
;
skb
->
data_len
+=
len
;
skb
->
len
+=
len
;
}
while
(
--
mhdr
->
num_buffers
)
{
while
(
--
hdr
->
mhdr
.
num_buffers
)
{
struct
sk_buff
*
nskb
;
i
=
skb_shinfo
(
skb
)
->
nr_frags
;
...
...
@@ -184,7 +177,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
nskb
=
vi
->
rvq
->
vq_ops
->
get_buf
(
vi
->
rvq
,
&
len
);
if
(
!
nskb
)
{
pr_debug
(
"%s: rx error: %d buffers missing
\n
"
,
dev
->
name
,
mhdr
->
num_buffers
);
dev
->
name
,
hdr
->
mhdr
.
num_buffers
);
dev
->
stats
.
rx_length_errors
++
;
goto
drop
;
}
...
...
@@ -205,7 +198,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
skb
->
len
+=
len
;
}
}
else
{
len
-=
sizeof
(
struct
virtio_net_
hdr
);
len
-=
sizeof
(
hdr
->
hdr
);
if
(
len
<=
MAX_PACKET_LEN
)
trim_pages
(
vi
,
skb
);
...
...
@@ -223,9 +216,11 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
dev
->
stats
.
rx_bytes
+=
skb
->
len
;
dev
->
stats
.
rx_packets
++
;
if
(
hdr
->
flags
&
VIRTIO_NET_HDR_F_NEEDS_CSUM
)
{
if
(
hdr
->
hdr
.
flags
&
VIRTIO_NET_HDR_F_NEEDS_CSUM
)
{
pr_debug
(
"Needs csum!
\n
"
);
if
(
!
skb_partial_csum_set
(
skb
,
hdr
->
csum_start
,
hdr
->
csum_offset
))
if
(
!
skb_partial_csum_set
(
skb
,
hdr
->
hdr
.
csum_start
,
hdr
->
hdr
.
csum_offset
))
goto
frame_err
;
}
...
...
@@ -233,9 +228,9 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
pr_debug
(
"Receiving skb proto 0x%04x len %i type %i
\n
"
,
ntohs
(
skb
->
protocol
),
skb
->
len
,
skb
->
pkt_type
);
if
(
hdr
->
gso_type
!=
VIRTIO_NET_HDR_GSO_NONE
)
{
if
(
hdr
->
hdr
.
gso_type
!=
VIRTIO_NET_HDR_GSO_NONE
)
{
pr_debug
(
"GSO!
\n
"
);
switch
(
hdr
->
gso_type
&
~
VIRTIO_NET_HDR_GSO_ECN
)
{
switch
(
hdr
->
hdr
.
gso_type
&
~
VIRTIO_NET_HDR_GSO_ECN
)
{
case
VIRTIO_NET_HDR_GSO_TCPV4
:
skb_shinfo
(
skb
)
->
gso_type
=
SKB_GSO_TCPV4
;
break
;
...
...
@@ -248,14 +243,14 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
default:
if
(
net_ratelimit
())
printk
(
KERN_WARNING
"%s: bad gso type %u.
\n
"
,
dev
->
name
,
hdr
->
gso_type
);
dev
->
name
,
hdr
->
hdr
.
gso_type
);
goto
frame_err
;
}
if
(
hdr
->
gso_type
&
VIRTIO_NET_HDR_GSO_ECN
)
if
(
hdr
->
hdr
.
gso_type
&
VIRTIO_NET_HDR_GSO_ECN
)
skb_shinfo
(
skb
)
->
gso_type
|=
SKB_GSO_TCP_ECN
;
skb_shinfo
(
skb
)
->
gso_size
=
hdr
->
gso_size
;
skb_shinfo
(
skb
)
->
gso_size
=
hdr
->
hdr
.
gso_size
;
if
(
skb_shinfo
(
skb
)
->
gso_size
==
0
)
{
if
(
net_ratelimit
())
printk
(
KERN_WARNING
"%s: zero gso size.
\n
"
,
...
...
@@ -285,8 +280,8 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
bool
oom
=
false
;
sg_init_table
(
sg
,
2
+
MAX_SKB_FRAGS
);
for
(;;)
{
struct
virtio_
net_hdr
*
hdr
;
do
{
struct
skb_v
net_hdr
*
hdr
;
skb
=
netdev_alloc_skb
(
vi
->
dev
,
MAX_PACKET_LEN
+
NET_IP_ALIGN
);
if
(
unlikely
(
!
skb
))
{
...
...
@@ -298,7 +293,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
skb_put
(
skb
,
MAX_PACKET_LEN
);
hdr
=
skb_vnet_hdr
(
skb
);
sg_set_buf
(
sg
,
hdr
,
sizeof
(
*
hdr
));
sg_set_buf
(
sg
,
&
hdr
->
hdr
,
sizeof
(
hdr
->
hdr
));
if
(
vi
->
big_packets
)
{
for
(
i
=
0
;
i
<
MAX_SKB_FRAGS
;
i
++
)
{
...
...
@@ -328,7 +323,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
break
;
}
vi
->
num
++
;
}
}
while
(
err
>=
num
);
if
(
unlikely
(
vi
->
num
>
vi
->
max
))
vi
->
max
=
vi
->
num
;
vi
->
rvq
->
vq_ops
->
kick
(
vi
->
rvq
);
...
...
@@ -346,7 +341,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
if
(
!
vi
->
mergeable_rx_bufs
)
return
try_fill_recv_maxbufs
(
vi
,
gfp
);
for
(;;)
{
do
{
skb_frag_t
*
f
;
skb
=
netdev_alloc_skb
(
vi
->
dev
,
GOOD_COPY_LEN
+
NET_IP_ALIGN
);
...
...
@@ -380,7 +375,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
break
;
}
vi
->
num
++
;
}
}
while
(
err
>
0
);
if
(
unlikely
(
vi
->
num
>
vi
->
max
))
vi
->
max
=
vi
->
num
;
vi
->
rvq
->
vq_ops
->
kick
(
vi
->
rvq
);
...
...
@@ -448,42 +443,26 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
return
received
;
}
static
void
free_old_xmit_skbs
(
struct
virtnet_info
*
vi
)
static
unsigned
int
free_old_xmit_skbs
(
struct
virtnet_info
*
vi
)
{
struct
sk_buff
*
skb
;
unsigned
int
len
;
unsigned
int
len
,
tot_sgs
=
0
;
while
((
skb
=
vi
->
svq
->
vq_ops
->
get_buf
(
vi
->
svq
,
&
len
))
!=
NULL
)
{
pr_debug
(
"Sent skb %p
\n
"
,
skb
);
__skb_unlink
(
skb
,
&
vi
->
send
);
vi
->
dev
->
stats
.
tx_bytes
+=
skb
->
len
;
vi
->
dev
->
stats
.
tx_packets
++
;
tot_sgs
+=
skb_vnet_hdr
(
skb
)
->
num_sg
;
kfree_skb
(
skb
);
}
}
/* If the virtio transport doesn't always notify us when all in-flight packets
* are consumed, we fall back to using this function on a timer to free them. */
static
void
xmit_free
(
unsigned
long
data
)
{
struct
virtnet_info
*
vi
=
(
void
*
)
data
;
netif_tx_lock
(
vi
->
dev
);
free_old_xmit_skbs
(
vi
);
if
(
!
skb_queue_empty
(
&
vi
->
send
))
mod_timer
(
&
vi
->
xmit_free_timer
,
jiffies
+
(
HZ
/
10
));
netif_tx_unlock
(
vi
->
dev
);
return
tot_sgs
;
}
static
int
xmit_skb
(
struct
virtnet_info
*
vi
,
struct
sk_buff
*
skb
)
{
int
num
,
err
;
struct
scatterlist
sg
[
2
+
MAX_SKB_FRAGS
];
struct
virtio_net_hdr_mrg_rxbuf
*
mhdr
=
skb_vnet_hdr
(
skb
);
struct
virtio_net_hdr
*
hdr
=
skb_vnet_hdr
(
skb
);
struct
skb_vnet_hdr
*
hdr
=
skb_vnet_hdr
(
skb
);
const
unsigned
char
*
dest
=
((
struct
ethhdr
*
)
skb
->
data
)
->
h_dest
;
sg_init_table
(
sg
,
2
+
MAX_SKB_FRAGS
);
...
...
@@ -491,108 +470,89 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
pr_debug
(
"%s: xmit %p %pM
\n
"
,
vi
->
dev
->
name
,
skb
,
dest
);
if
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
)
{
hdr
->
flags
=
VIRTIO_NET_HDR_F_NEEDS_CSUM
;
hdr
->
csum_start
=
skb
->
csum_start
-
skb_headroom
(
skb
);
hdr
->
csum_offset
=
skb
->
csum_offset
;
hdr
->
hdr
.
flags
=
VIRTIO_NET_HDR_F_NEEDS_CSUM
;
hdr
->
hdr
.
csum_start
=
skb
->
csum_start
-
skb_headroom
(
skb
);
hdr
->
hdr
.
csum_offset
=
skb
->
csum_offset
;
}
else
{
hdr
->
flags
=
0
;
hdr
->
csum_offset
=
hdr
->
csum_start
=
0
;
hdr
->
hdr
.
flags
=
0
;
hdr
->
hdr
.
csum_offset
=
hdr
->
hdr
.
csum_start
=
0
;
}
if
(
skb_is_gso
(
skb
))
{
hdr
->
hdr_len
=
skb_headlen
(
skb
);
hdr
->
gso_size
=
skb_shinfo
(
skb
)
->
gso_size
;
hdr
->
hdr
.
hdr
_len
=
skb_headlen
(
skb
);
hdr
->
hdr
.
gso_size
=
skb_shinfo
(
skb
)
->
gso_size
;
if
(
skb_shinfo
(
skb
)
->
gso_type
&
SKB_GSO_TCPV4
)
hdr
->
gso_type
=
VIRTIO_NET_HDR_GSO_TCPV4
;
hdr
->
hdr
.
gso_type
=
VIRTIO_NET_HDR_GSO_TCPV4
;
else
if
(
skb_shinfo
(
skb
)
->
gso_type
&
SKB_GSO_TCPV6
)
hdr
->
gso_type
=
VIRTIO_NET_HDR_GSO_TCPV6
;
hdr
->
hdr
.
gso_type
=
VIRTIO_NET_HDR_GSO_TCPV6
;
else
if
(
skb_shinfo
(
skb
)
->
gso_type
&
SKB_GSO_UDP
)
hdr
->
gso_type
=
VIRTIO_NET_HDR_GSO_UDP
;
hdr
->
hdr
.
gso_type
=
VIRTIO_NET_HDR_GSO_UDP
;
else
BUG
();
if
(
skb_shinfo
(
skb
)
->
gso_type
&
SKB_GSO_TCP_ECN
)
hdr
->
gso_type
|=
VIRTIO_NET_HDR_GSO_ECN
;
hdr
->
hdr
.
gso_type
|=
VIRTIO_NET_HDR_GSO_ECN
;
}
else
{
hdr
->
gso_type
=
VIRTIO_NET_HDR_GSO_NONE
;
hdr
->
gso_size
=
hdr
->
hdr_len
=
0
;
hdr
->
hdr
.
gso_type
=
VIRTIO_NET_HDR_GSO_NONE
;
hdr
->
hdr
.
gso_size
=
hdr
->
hdr
.
hdr_len
=
0
;
}
mhdr
->
num_buffers
=
0
;
hdr
->
mhdr
.
num_buffers
=
0
;
/* Encode metadata header at front. */
if
(
vi
->
mergeable_rx_bufs
)
sg_set_buf
(
sg
,
mhdr
,
sizeof
(
*
mhdr
));
sg_set_buf
(
sg
,
&
hdr
->
mhdr
,
sizeof
(
hdr
->
mhdr
));
else
sg_set_buf
(
sg
,
hdr
,
sizeof
(
*
hdr
));
sg_set_buf
(
sg
,
&
hdr
->
hdr
,
sizeof
(
hdr
->
hdr
));
num
=
skb_to_sgvec
(
skb
,
sg
+
1
,
0
,
skb
->
len
)
+
1
;
err
=
vi
->
svq
->
vq_ops
->
add_buf
(
vi
->
svq
,
sg
,
num
,
0
,
skb
);
if
(
err
>=
0
&&
!
vi
->
free_in_tasklet
)
mod_timer
(
&
vi
->
xmit_free_timer
,
jiffies
+
(
HZ
/
10
));
return
err
;
}
static
void
xmit_tasklet
(
unsigned
long
data
)
{
struct
virtnet_info
*
vi
=
(
void
*
)
data
;
netif_tx_lock_bh
(
vi
->
dev
);
if
(
vi
->
last_xmit_skb
&&
xmit_skb
(
vi
,
vi
->
last_xmit_skb
)
>=
0
)
{
vi
->
svq
->
vq_ops
->
kick
(
vi
->
svq
);
vi
->
last_xmit_skb
=
NULL
;
}
if
(
vi
->
free_in_tasklet
)
free_old_xmit_skbs
(
vi
);
netif_tx_unlock_bh
(
vi
->
dev
);
hdr
->
num_sg
=
skb_to_sgvec
(
skb
,
sg
+
1
,
0
,
skb
->
len
)
+
1
;
return
vi
->
svq
->
vq_ops
->
add_buf
(
vi
->
svq
,
sg
,
hdr
->
num_sg
,
0
,
skb
);
}
static
netdev_tx_t
start_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
)
{
struct
virtnet_info
*
vi
=
netdev_priv
(
dev
);
int
capacity
;
again:
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs
(
vi
);
/* If we has a buffer left over from last time, send it now. */
if
(
unlikely
(
vi
->
last_xmit_skb
)
&&
xmit_skb
(
vi
,
vi
->
last_xmit_skb
)
<
0
)
goto
stop_queue
;
vi
->
last_xmit_skb
=
NULL
;
/* Put new one in send queue and do transmit */
if
(
likely
(
skb
))
{
__skb_queue_head
(
&
vi
->
send
,
skb
);
if
(
xmit_skb
(
vi
,
skb
)
<
0
)
{
vi
->
last_xmit_skb
=
skb
;
skb
=
NULL
;
goto
stop_queue
;
__skb_queue_head
(
&
vi
->
send
,
skb
);
capacity
=
xmit_skb
(
vi
,
skb
);
/* This can happen with OOM and indirect buffers. */
if
(
unlikely
(
capacity
<
0
))
{
netif_stop_queue
(
dev
);
dev_warn
(
&
dev
->
dev
,
"Unexpected full queue
\n
"
);
if
(
unlikely
(
!
vi
->
svq
->
vq_ops
->
enable_cb
(
vi
->
svq
)))
{
vi
->
svq
->
vq_ops
->
disable_cb
(
vi
->
svq
);
netif_start_queue
(
dev
);
goto
again
;
}
return
NETDEV_TX_BUSY
;
}
done:
vi
->
svq
->
vq_ops
->
kick
(
vi
->
svq
);
return
NETDEV_TX_OK
;
stop_queue:
pr_debug
(
"%s: virtio not prepared to send
\n
"
,
dev
->
name
);
netif_stop_queue
(
dev
);
/* Activate callback for using skbs: if this returns false it
* means some were used in the meantime. */
if
(
unlikely
(
!
vi
->
svq
->
vq_ops
->
enable_cb
(
vi
->
svq
)))
{
vi
->
svq
->
vq_ops
->
disable_cb
(
vi
->
svq
);
netif_start_queue
(
dev
);
goto
again
;
}
if
(
skb
)
{
/* Drop this skb: we only queue one. */
vi
->
dev
->
stats
.
tx_dropped
++
;
kfree_skb
(
skb
);
vi
->
svq
->
vq_ops
->
kick
(
vi
->
svq
);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan
(
skb
);
nf_reset
(
skb
);
/* Apparently nice girls don't return TX_BUSY; stop the queue
* before it gets out of hand. Naturally, this wastes entries. */
if
(
capacity
<
2
+
MAX_SKB_FRAGS
)
{
netif_stop_queue
(
dev
);
if
(
unlikely
(
!
vi
->
svq
->
vq_ops
->
enable_cb
(
vi
->
svq
)))
{
/* More just got used, free them then recheck. */
capacity
+=
free_old_xmit_skbs
(
vi
);
if
(
capacity
>=
2
+
MAX_SKB_FRAGS
)
{
netif_start_queue
(
dev
);
vi
->
svq
->
vq_ops
->
disable_cb
(
vi
->
svq
);
}
}
}
goto
done
;
return
NETDEV_TX_OK
;
}
static
int
virtnet_set_mac_address
(
struct
net_device
*
dev
,
void
*
p
)
...
...
@@ -925,10 +885,6 @@ static int virtnet_probe(struct virtio_device *vdev)
vi
->
pages
=
NULL
;
INIT_DELAYED_WORK
(
&
vi
->
refill
,
refill_work
);
/* If they give us a callback when all buffers are done, we don't need
* the timer. */
vi
->
free_in_tasklet
=
virtio_has_feature
(
vdev
,
VIRTIO_F_NOTIFY_ON_EMPTY
);
/* If we can receive ANY GSO packets, we must allocate large ones. */
if
(
virtio_has_feature
(
vdev
,
VIRTIO_NET_F_GUEST_TSO4
)
||
virtio_has_feature
(
vdev
,
VIRTIO_NET_F_GUEST_TSO6
)
...
...
@@ -960,11 +916,6 @@ static int virtnet_probe(struct virtio_device *vdev)
skb_queue_head_init
(
&
vi
->
recv
);
skb_queue_head_init
(
&
vi
->
send
);
tasklet_init
(
&
vi
->
tasklet
,
xmit_tasklet
,
(
unsigned
long
)
vi
);
if
(
!
vi
->
free_in_tasklet
)
setup_timer
(
&
vi
->
xmit_free_timer
,
xmit_free
,
(
unsigned
long
)
vi
);
err
=
register_netdev
(
dev
);
if
(
err
)
{
pr_debug
(
"virtio_net: registering device failed
\n
"
);
...
...
@@ -1005,9 +956,6 @@ static void virtnet_remove(struct virtio_device *vdev)
/* Stop all the virtqueues. */
vdev
->
config
->
reset
(
vdev
);
if
(
!
vi
->
free_in_tasklet
)
del_timer_sync
(
&
vi
->
xmit_free_timer
);
/* Free our skbs in send and recv queues, if any. */
while
((
skb
=
__skb_dequeue
(
&
vi
->
recv
))
!=
NULL
)
{
kfree_skb
(
skb
);
...
...
@@ -1041,7 +989,6 @@ static unsigned int features[] = {
VIRTIO_NET_F_GUEST_ECN
,
VIRTIO_NET_F_GUEST_UFO
,
VIRTIO_NET_F_MRG_RXBUF
,
VIRTIO_NET_F_STATUS
,
VIRTIO_NET_F_CTRL_VQ
,
VIRTIO_NET_F_CTRL_RX
,
VIRTIO_NET_F_CTRL_VLAN
,
VIRTIO_F_NOTIFY_ON_EMPTY
,
};
static
struct
virtio_driver
virtio_net
=
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment