Commit cea0aa9c authored by David S. Miller's avatar David S. Miller

Merge branch 's390-next'

Julian Wiedmann says:

====================
s390/qeth: updates 2019-04-17

please apply some additional qeth patches to net-next. This patchset
converts the driver to use the kernel's multiqueue model.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3a6f7892 54a50941
......@@ -219,6 +219,9 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/* QDIO queue and buffer handling */
/*****************************************************************************/
#define QETH_MAX_QUEUES 4
#define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */
#define QETH_IQD_MCAST_TXQ 0
#define QETH_IQD_MIN_UCAST_TXQ 1
#define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 64
#define QETH_IN_BUF_COUNT_HSDEFAULT 128
......@@ -464,7 +467,6 @@ struct qeth_card_stats {
u64 rx_errors;
u64 rx_dropped;
u64 rx_multicast;
u64 tx_errors;
};
struct qeth_out_q_stats {
......@@ -479,6 +481,7 @@ struct qeth_out_q_stats {
u64 skbs_linearized_fail;
u64 tso_bytes;
u64 packing_mode_switch;
u64 stopped;
/* rtnl_link_stats64 */
u64 tx_packets;
......@@ -509,6 +512,11 @@ struct qeth_qdio_out_q {
atomic_t set_pci_flags_count;
};
static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
{
return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
}
struct qeth_qdio_info {
atomic_t state;
/* input */
......@@ -836,6 +844,15 @@ static inline bool qeth_netdev_is_registered(struct net_device *dev)
return dev->netdev_ops != NULL;
}
static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
{
if (txq == QETH_IQD_MCAST_TXQ)
return dev->num_tx_queues - 1;
if (txq == dev->num_tx_queues - 1)
return QETH_IQD_MCAST_TXQ;
return txq;
}
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
......@@ -931,18 +948,7 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
data, QETH_PROT_IPV6);
}
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
int ipv);
static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card,
struct sk_buff *skb,
int ipv, int cast_type)
{
if (IS_IQD(card) && cast_type != RTN_UNICAST)
return card->qdio.out_qs[card->qdio.no_out_queues - 1];
if (!card->qdio.do_prio_queueing)
return card->qdio.out_qs[card->qdio.default_out_queue];
return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)];
}
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
......@@ -988,7 +994,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_clear_cmd_buffers(struct qeth_channel *);
void qeth_clear_qdio_buffers(struct qeth_card *);
void qeth_drain_output_queues(struct qeth_card *card);
void qeth_setadp_promisc_mode(struct qeth_card *);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
......@@ -1023,6 +1029,8 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev);
int qeth_open(struct net_device *dev);
int qeth_stop(struct net_device *dev);
......
This diff is collapsed.
......@@ -198,6 +198,9 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
if (!card)
return -EINVAL;
if (IS_IQD(card))
return -EOPNOTSUPP;
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
......@@ -239,10 +242,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 2;
} else if (sysfs_streq(buf, "no_prio_queueing:3")) {
if (card->info.type == QETH_CARD_TYPE_IQD) {
rc = -EPERM;
goto out;
}
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 3;
} else if (sysfs_streq(buf, "no_prio_queueing")) {
......
......@@ -38,6 +38,7 @@ static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail),
QETH_TXQ_STAT("TSO bytes", tso_bytes),
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
QETH_TXQ_STAT("Queue stopped", stopped),
};
static const struct qeth_stats card_stats[] = {
......@@ -154,6 +155,21 @@ static void qeth_get_drvinfo(struct net_device *dev,
CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
}
static void qeth_get_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
struct qeth_card *card = dev->ml_priv;
channels->max_rx = dev->num_rx_queues;
channels->max_tx = card->qdio.no_out_queues;
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = dev->real_num_rx_queues;
channels->tx_count = dev->real_num_tx_queues;
channels->other_count = 0;
channels->combined_count = 0;
}
/* Helper function to fill 'advertising' and 'supported' which are the same. */
/* Autoneg and full-duplex are supported and advertised unconditionally. */
/* Always advertise and support all speeds up to specified, and only one */
......@@ -359,6 +375,7 @@ const struct ethtool_ops qeth_ethtool_ops = {
.get_ethtool_stats = qeth_get_ethtool_stats,
.get_sset_count = qeth_get_sset_count,
.get_drvinfo = qeth_get_drvinfo,
.get_channels = qeth_get_channels,
.get_link_ksettings = qeth_get_link_ksettings,
};
......
......@@ -161,10 +161,8 @@ static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
}
}
static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
static int qeth_l2_get_cast_type(struct sk_buff *skb)
{
if (card->info.type == QETH_CARD_TYPE_OSN)
return RTN_UNICAST;
if (is_broadcast_ether_addr(skb->data))
return RTN_BROADCAST;
if (is_multicast_ether_addr(skb->data))
......@@ -299,7 +297,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
}
if (card->state == CARD_STATE_HARDSETUP) {
qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card);
qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
......@@ -603,37 +601,44 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
int cast_type = qeth_l2_get_cast_type(card, skb);
int ipv = qeth_get_ip_version(skb);
u16 txq = skb_get_queue_mapping(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc;
queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
netif_stop_queue(dev);
if (IS_IQD(card))
txq = qeth_iqd_translate_txq(dev, txq);
queue = card->qdio.out_qs[txq];
if (IS_OSN(card))
rc = qeth_l2_xmit_osn(card, skb, queue);
else
rc = qeth_xmit(card, skb, queue, ipv, cast_type,
qeth_l2_fill_header);
rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
qeth_l2_get_cast_type(skb), qeth_l2_fill_header);
if (!rc) {
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
netif_wake_queue(dev);
return NETDEV_TX_OK;
} else if (rc == -EBUSY) {
return NETDEV_TX_BUSY;
} /* else fall through */
}
QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct qeth_card *card = dev->ml_priv;
if (IS_IQD(card))
return qeth_iqd_select_queue(dev, skb,
qeth_l2_get_cast_type(skb),
sb_dev);
return qeth_get_priority_queue(card, skb);
}
static const struct device_type qeth_l2_devtype = {
.name = "qeth_layer2",
.groups = qeth_l2_attr_groups,
......@@ -687,6 +692,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check,
.ndo_select_queue = qeth_l2_select_queue,
.ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
......
......@@ -1433,7 +1433,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
}
if (card->state == CARD_STATE_HARDSETUP) {
qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card);
qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
......@@ -2036,7 +2036,6 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
{
unsigned char eth_hdr[ETH_HLEN];
unsigned int hw_hdr_len;
int rc;
......@@ -2046,45 +2045,44 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
if (rc)
return rc;
skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
skb_pull(skb, ETH_HLEN);
qeth_l3_fixup_headers(skb);
rc = qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
if (rc == -EBUSY) {
/* roll back to ETH header */
skb_push(skb, ETH_HLEN);
skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
}
return rc;
return qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
}
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
int cast_type = qeth_l3_get_cast_type(skb);
struct qeth_card *card = dev->ml_priv;
u16 txq = skb_get_queue_mapping(skb);
int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc;
queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
int cast_type, rc;
if (IS_IQD(card)) {
queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
if (card->options.sniffer)
goto tx_drop;
if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
(card->options.cq == QETH_CQ_ENABLED &&
skb->protocol != htons(ETH_P_AF_IUCV)))
goto tx_drop;
if (txq == QETH_IQD_MCAST_TXQ)
cast_type = qeth_l3_get_cast_type(skb);
else
cast_type = RTN_UNICAST;
} else {
queue = card->qdio.out_qs[txq];
cast_type = qeth_l3_get_cast_type(skb);
}
if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
goto tx_drop;
netif_stop_queue(dev);
if (ipv == 4 || IS_IQD(card))
rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
else
......@@ -2094,16 +2092,12 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (!rc) {
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
netif_wake_queue(dev);
return NETDEV_TX_OK;
} else if (rc == -EBUSY) {
return NETDEV_TX_BUSY;
} /* else fall through */
}
tx_drop:
QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
......@@ -2147,11 +2141,27 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
return qeth_features_check(skb, dev, features);
}
static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb),
sb_dev);
}
static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct qeth_card *card = dev->ml_priv;
return qeth_get_priority_queue(card, skb);
}
static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_select_queue = qeth_l3_iqd_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
......@@ -2168,6 +2178,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_l3_osa_features_check,
.ndo_select_queue = qeth_l3_osa_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment