Commit 37ac5db6 authored by David S. Miller's avatar David S. Miller

Merge branch 's390-net-next'

Julian Wiedmann says:

====================
s390/net: updates 2018-09-26

please apply one more series of cleanups and small improvements for qeth
to net-next. Note that one patch needs to touch both af_iucv and qeth, in
order to untangle their receive paths.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4b1bd697 91cc98f5
......@@ -582,7 +582,8 @@ struct qeth_cmd_buffer {
struct qeth_channel *channel;
unsigned char *data;
int rc;
void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
void (*callback)(struct qeth_card *card, struct qeth_channel *channel,
struct qeth_cmd_buffer *iob);
};
static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
......@@ -671,6 +672,12 @@ struct qeth_card_info {
__u32 hwtrap;
};
enum qeth_discipline_id {
QETH_DISCIPLINE_UNDETERMINED = -1,
QETH_DISCIPLINE_LAYER3 = 0,
QETH_DISCIPLINE_LAYER2 = 1,
};
struct qeth_card_options {
struct qeth_routing_info route4;
struct qeth_ipa_info ipa4;
......@@ -680,7 +687,7 @@ struct qeth_card_options {
struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
struct qeth_vnicc_info vnicc; /* VNICC options */
int fake_broadcast;
int layer2;
enum qeth_discipline_id layer;
int performance_stats;
int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
......@@ -690,6 +697,9 @@ struct qeth_card_options {
char hsuid[9];
};
#define IS_LAYER2(card) ((card)->options.layer == QETH_DISCIPLINE_LAYER2)
#define IS_LAYER3(card) ((card)->options.layer == QETH_DISCIPLINE_LAYER3)
/*
* thread bits for qeth_card thread masks
*/
......@@ -702,12 +712,6 @@ struct qeth_osn_info {
int (*data_cb)(struct sk_buff *skb);
};
enum qeth_discipline_id {
QETH_DISCIPLINE_UNDETERMINED = -1,
QETH_DISCIPLINE_LAYER3 = 0,
QETH_DISCIPLINE_LAYER2 = 1,
};
struct qeth_discipline {
const struct device_type *devtype;
int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done);
......@@ -759,7 +763,6 @@ struct qeth_switch_info {
struct qeth_card {
struct list_head list;
enum qeth_card_states state;
int lan_online;
spinlock_t lock;
struct ccwgroup_device *gdev;
struct qeth_channel read;
......
This diff is collapsed.
......@@ -31,10 +31,9 @@ static ssize_t qeth_dev_state_show(struct device *dev,
case CARD_STATE_SOFTSETUP:
return sprintf(buf, "SOFTSETUP\n");
case CARD_STATE_UP:
if (card->lan_online)
return sprintf(buf, "UP (LAN ONLINE)\n");
else
return sprintf(buf, "UP (LAN OFFLINE)\n");
return sprintf(buf, "UP (LAN %s)\n",
netif_carrier_ok(card->dev) ? "ONLINE" :
"OFFLINE");
case CARD_STATE_RECOVER:
return sprintf(buf, "RECOVER\n");
default:
......@@ -228,7 +227,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (sysfs_streq(buf, "prio_queueing_vlan")) {
if (!card->options.layer2) {
if (IS_LAYER3(card)) {
rc = -ENOTSUPP;
goto out;
}
......@@ -379,7 +378,7 @@ static ssize_t qeth_dev_layer2_show(struct device *dev,
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->options.layer2);
return sprintf(buf, "%i\n", card->options.layer);
}
static ssize_t qeth_dev_layer2_store(struct device *dev,
......@@ -413,7 +412,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
goto out;
}
if (card->options.layer2 == newdis)
if (card->options.layer == newdis)
goto out;
if (card->info.layer_enforced) {
/* fixed layer, can't switch */
......@@ -432,8 +431,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
card->discipline->remove(card->gdev);
qeth_core_free_discipline(card);
card->options.layer2 = -1;
free_netdev(card->dev);
card->dev = ndev;
}
......
......@@ -694,7 +694,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
int tx_bytes = skb->len;
int rc;
if ((card->state != CARD_STATE_UP) || !card->lan_online) {
if (card->state != CARD_STATE_UP) {
card->stats.tx_carrier_errors++;
goto tx_drop;
}
......@@ -806,7 +806,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
}
INIT_LIST_HEAD(&card->vid_list);
hash_init(card->mac_htable);
card->options.layer2 = 1;
card->info.hwtrap = 0;
qeth_l2_vnicc_set_defaults(card);
return 0;
......@@ -998,10 +997,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
card->state = CARD_STATE_SOFTSETUP;
if (card->lan_online)
netif_carrier_on(card->dev);
else
netif_carrier_off(card->dev);
qeth_set_allowed_threads(card, 0xffffffff, 0);
......@@ -1147,9 +1142,6 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
if (gdev->state == CCWGROUP_OFFLINE)
goto out;
if (card->state == CARD_STATE_RECOVER) {
rc = __qeth_l2_set_online(card->gdev, 1);
if (rc) {
......@@ -1159,7 +1151,7 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
}
} else
rc = __qeth_l2_set_online(card->gdev, 0);
out:
qeth_set_allowed_threads(card, 0xffffffff, 0);
netif_device_attach(card->dev);
if (rc)
......
......@@ -1348,6 +1348,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
int budget, int *done)
{
struct net_device *dev = card->dev;
int work_done = 0;
struct sk_buff *skb;
struct qeth_hdr *hdr;
......@@ -1369,11 +1370,10 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
magic = *(__u16 *)skb->data;
if ((card->info.type == QETH_CARD_TYPE_IQD) &&
(magic == ETH_P_AF_IUCV)) {
skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
len = skb->len;
card->dev->header_ops->create(skb, card->dev, 0,
card->dev->dev_addr, "FAKELL", len);
skb_reset_mac_header(skb);
dev_hard_header(skb, dev, ETH_P_AF_IUCV,
dev->dev_addr, "FAKELL", len);
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
} else {
qeth_l3_rebuild_skb(card, skb, hdr);
......@@ -2005,17 +2005,15 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
unsigned int data_len)
{
char daddr[16];
struct af_iucv_trans_hdr *iucv_hdr;
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
hdr->hdr.l3.length = data_len;
hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN);
memset(daddr, 0, sizeof(daddr));
daddr[0] = 0xfe;
daddr[1] = 0x80;
memcpy(&daddr[8], iucv_hdr->destUserID, 8);
memcpy(&daddr[8], iucv_trans_hdr(skb)->destUserID, 8);
memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
}
......@@ -2235,7 +2233,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
goto tx_drop;
}
if (card->state != CARD_STATE_UP || !card->lan_online) {
if (card->state != CARD_STATE_UP) {
card->stats.tx_carrier_errors++;
goto tx_drop;
}
......@@ -2489,7 +2487,6 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
}
hash_init(card->ip_htable);
hash_init(card->ip_mc_htable);
card->options.layer2 = 0;
card->info.hwtrap = 0;
return 0;
}
......@@ -2576,10 +2573,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
qeth_set_allowed_threads(card, 0xffffffff, 0);
qeth_l3_recover_ip(card);
if (card->lan_online)
netif_carrier_on(card->dev);
else
netif_carrier_off(card->dev);
qeth_enable_hw_features(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
......@@ -2717,9 +2710,6 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
if (gdev->state == CCWGROUP_OFFLINE)
goto out;
if (card->state == CARD_STATE_RECOVER) {
rc = __qeth_l3_set_online(card->gdev, 1);
if (rc) {
......@@ -2729,7 +2719,7 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
}
} else
rc = __qeth_l3_set_online(card->gdev, 0);
out:
qeth_set_allowed_threads(card, 0xffffffff, 0);
netif_device_attach(card->dev);
if (rc)
......
......@@ -80,6 +80,11 @@ struct af_iucv_trans_hdr {
u8 pad; /* total 104 bytes */
} __packed;
static inline struct af_iucv_trans_hdr *iucv_trans_hdr(struct sk_buff *skb)
{
return (struct af_iucv_trans_hdr *)skb_network_header(skb);
}
enum iucv_tx_notify {
/* transmission of skb is completed and was successful */
TX_NOTIFY_OK = 0,
......
......@@ -320,13 +320,9 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
struct sk_buff *nskb;
int err, confirm_recv = 0;
memset(skb->head, 0, ETH_HLEN);
phs_hdr = skb_push(skb, sizeof(struct af_iucv_trans_hdr));
skb_reset_mac_header(skb);
phs_hdr = skb_push(skb, sizeof(*phs_hdr));
memset(phs_hdr, 0, sizeof(*phs_hdr));
skb_reset_network_header(skb);
skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
phs_hdr->magic = ETH_P_AF_IUCV;
phs_hdr->version = 1;
......@@ -350,6 +346,9 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
if (imsg)
memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
skb_push(skb, ETH_HLEN);
memset(skb->data, 0, ETH_HLEN);
skb->dev = iucv->hs_dev;
if (!skb->dev) {
err = -ENODEV;
......@@ -1943,8 +1942,7 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
/***************** HiperSockets transport callbacks ********************/
static void afiucv_swap_src_dest(struct sk_buff *skb)
{
struct af_iucv_trans_hdr *trans_hdr =
(struct af_iucv_trans_hdr *)skb->data;
struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
char tmpID[8];
char tmpName[8];
......@@ -1967,13 +1965,12 @@ static void afiucv_swap_src_dest(struct sk_buff *skb)
**/
static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
{
struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
struct sock *nsk;
struct iucv_sock *iucv, *niucv;
struct af_iucv_trans_hdr *trans_hdr;
int err;
iucv = iucv_sk(sk);
trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
if (!iucv) {
/* no sock - connection refused */
afiucv_swap_src_dest(skb);
......@@ -2034,15 +2031,13 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
struct af_iucv_trans_hdr *trans_hdr =
(struct af_iucv_trans_hdr *)skb->data;
if (!iucv)
goto out;
if (sk->sk_state != IUCV_BOUND)
goto out;
bh_lock_sock(sk);
iucv->msglimit_peer = trans_hdr->window;
iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
sk->sk_state = IUCV_CONNECTED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
......@@ -2098,8 +2093,6 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
struct af_iucv_trans_hdr *trans_hdr =
(struct af_iucv_trans_hdr *)skb->data;
if (!iucv)
return NET_RX_SUCCESS;
......@@ -2107,7 +2100,7 @@ static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state != IUCV_CONNECTED)
return NET_RX_SUCCESS;
atomic_sub(trans_hdr->window, &iucv->msg_sent);
atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent);
iucv_sock_wake_msglim(sk);
return NET_RX_SUCCESS;
}
......@@ -2170,22 +2163,13 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
int err = NET_RX_SUCCESS;
char nullstring[8];
if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
(int)skb->len,
(int)(ETH_HLEN + sizeof(struct af_iucv_trans_hdr)));
if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb->len);
kfree_skb(skb);
return NET_RX_SUCCESS;
}
if (skb_headlen(skb) < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr)))
if (skb_linearize(skb)) {
WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d",
(int)skb->len);
kfree_skb(skb);
return NET_RX_SUCCESS;
}
skb_pull(skb, ETH_HLEN);
trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
trans_hdr = iucv_trans_hdr(skb);
EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment