Commit 60ecf84d authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo

Merge remote-tracking branch 'torvalds/master' into perf/core

Synch with upstream, to pick up recent developments.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parents 41d0914d 353f7988
...@@ -421,6 +421,10 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) ...@@ -421,6 +421,10 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
* @work: the worker that implements software debouncing * @work: the worker that implements software debouncing
* @sw_debounced: flag indicating if the software debouncer is active * @sw_debounced: flag indicating if the software debouncer is active
* @level: the current debounced physical level of the line * @level: the current debounced physical level of the line
* @hdesc: the Hardware Timestamp Engine (HTE) descriptor
* @raw_level: the line level at the time of event
* @total_discard_seq: the running counter of the discarded events
* @last_seqno: the last sequence number before debounce period expires
*/ */
struct line { struct line {
struct gpio_desc *desc; struct gpio_desc *desc;
......
...@@ -4231,10 +4231,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr, ...@@ -4231,10 +4231,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_node *cm_node; struct irdma_cm_node *cm_node;
struct list_head teardown_list; struct list_head teardown_list;
struct ib_qp_attr attr; struct ib_qp_attr attr;
struct irdma_sc_vsi *vsi = &iwdev->vsi;
struct irdma_sc_qp *sc_qp;
struct irdma_qp *qp;
int i;
INIT_LIST_HEAD(&teardown_list); INIT_LIST_HEAD(&teardown_list);
...@@ -4251,52 +4247,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr, ...@@ -4251,52 +4247,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
irdma_cm_disconn(cm_node->iwqp); irdma_cm_disconn(cm_node->iwqp);
irdma_rem_ref_cm_node(cm_node); irdma_rem_ref_cm_node(cm_node);
} }
if (!iwdev->roce_mode)
return;
INIT_LIST_HEAD(&teardown_list);
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
mutex_lock(&vsi->qos[i].qos_mutex);
list_for_each_safe (list_node, list_core_temp,
&vsi->qos[i].qplist) {
u32 qp_ip[4];
sc_qp = container_of(list_node, struct irdma_sc_qp,
list);
if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
continue;
qp = sc_qp->qp_uk.back_qp;
if (!disconnect_all) {
if (nfo->ipv4)
qp_ip[0] = qp->udp_info.local_ipaddr[3];
else
memcpy(qp_ip,
&qp->udp_info.local_ipaddr[0],
sizeof(qp_ip));
}
if (disconnect_all ||
(nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) &&
!memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
spin_lock(&iwdev->rf->qptable_lock);
if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
irdma_qp_add_ref(&qp->ibqp);
list_add(&qp->teardown_entry,
&teardown_list);
}
spin_unlock(&iwdev->rf->qptable_lock);
}
}
mutex_unlock(&vsi->qos[i].qos_mutex);
}
list_for_each_safe (list_node, list_core_temp, &teardown_list) {
qp = container_of(list_node, struct irdma_qp, teardown_entry);
attr.qp_state = IB_QPS_ERR;
irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
irdma_qp_rem_ref(&qp->ibqp);
}
} }
/** /**
......
...@@ -201,6 +201,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev) ...@@ -201,6 +201,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD; dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT; dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE; dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M;
dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE; dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES; dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;
......
...@@ -139,6 +139,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev) ...@@ -139,6 +139,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
dev->cqp_db = dev->hw_regs[IRDMA_CQPDB]; dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK]; dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
dev->irq_ops = &icrdma_irq_ops; dev->irq_ops = &icrdma_irq_ops;
dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE; dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
......
...@@ -127,6 +127,7 @@ struct irdma_hw_attrs { ...@@ -127,6 +127,7 @@ struct irdma_hw_attrs {
u64 max_hw_outbound_msg_size; u64 max_hw_outbound_msg_size;
u64 max_hw_inbound_msg_size; u64 max_hw_inbound_msg_size;
u64 max_mr_size; u64 max_mr_size;
u64 page_size_cap;
u32 min_hw_qp_id; u32 min_hw_qp_id;
u32 min_hw_aeq_size; u32 min_hw_aeq_size;
u32 max_hw_aeq_size; u32 max_hw_aeq_size;
......
...@@ -32,7 +32,7 @@ static int irdma_query_device(struct ib_device *ibdev, ...@@ -32,7 +32,7 @@ static int irdma_query_device(struct ib_device *ibdev,
props->vendor_part_id = pcidev->device; props->vendor_part_id = pcidev->device;
props->hw_ver = rf->pcidev->revision; props->hw_ver = rf->pcidev->revision;
props->page_size_cap = SZ_4K | SZ_2M | SZ_1G; props->page_size_cap = hw_attrs->page_size_cap;
props->max_mr_size = hw_attrs->max_mr_size; props->max_mr_size = hw_attrs->max_mr_size;
props->max_qp = rf->max_qp - rf->used_qps; props->max_qp = rf->max_qp - rf->used_qps;
props->max_qp_wr = hw_attrs->max_qp_wr; props->max_qp_wr = hw_attrs->max_qp_wr;
...@@ -2781,7 +2781,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, ...@@ -2781,7 +2781,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) { if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
iwmr->page_size = ib_umem_find_best_pgsz(region, iwmr->page_size = ib_umem_find_best_pgsz(region,
SZ_4K | SZ_2M | SZ_1G, iwdev->rf->sc_dev.hw_attrs.page_size_cap,
virt); virt);
if (unlikely(!iwmr->page_size)) { if (unlikely(!iwmr->page_size)) {
kfree(iwmr); kfree(iwmr);
......
...@@ -34,6 +34,27 @@ MODULE_LICENSE("GPL"); ...@@ -34,6 +34,27 @@ MODULE_LICENSE("GPL");
#define WATCH_QUEUE_NOTE_SIZE 128 #define WATCH_QUEUE_NOTE_SIZE 128
#define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE) #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
/*
* This must be called under the RCU read-lock, which makes
* sure that the wqueue still exists. It can then take the lock,
* and check that the wqueue hasn't been destroyed, which in
* turn makes sure that the notification pipe still exists.
*/
static inline bool lock_wqueue(struct watch_queue *wqueue)
{
spin_lock_bh(&wqueue->lock);
if (unlikely(wqueue->defunct)) {
spin_unlock_bh(&wqueue->lock);
return false;
}
return true;
}
static inline void unlock_wqueue(struct watch_queue *wqueue)
{
spin_unlock_bh(&wqueue->lock);
}
static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe, static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf) struct pipe_buffer *buf)
{ {
...@@ -69,6 +90,10 @@ static const struct pipe_buf_operations watch_queue_pipe_buf_ops = { ...@@ -69,6 +90,10 @@ static const struct pipe_buf_operations watch_queue_pipe_buf_ops = {
/* /*
* Post a notification to a watch queue. * Post a notification to a watch queue.
*
* Must be called with the RCU lock for reading, and the
* watch_queue lock held, which guarantees that the pipe
* hasn't been released.
*/ */
static bool post_one_notification(struct watch_queue *wqueue, static bool post_one_notification(struct watch_queue *wqueue,
struct watch_notification *n) struct watch_notification *n)
...@@ -85,9 +110,6 @@ static bool post_one_notification(struct watch_queue *wqueue, ...@@ -85,9 +110,6 @@ static bool post_one_notification(struct watch_queue *wqueue,
spin_lock_irq(&pipe->rd_wait.lock); spin_lock_irq(&pipe->rd_wait.lock);
if (wqueue->defunct)
goto out;
mask = pipe->ring_size - 1; mask = pipe->ring_size - 1;
head = pipe->head; head = pipe->head;
tail = pipe->tail; tail = pipe->tail;
...@@ -203,7 +225,10 @@ void __post_watch_notification(struct watch_list *wlist, ...@@ -203,7 +225,10 @@ void __post_watch_notification(struct watch_list *wlist,
if (security_post_notification(watch->cred, cred, n) < 0) if (security_post_notification(watch->cred, cred, n) < 0)
continue; continue;
if (lock_wqueue(wqueue)) {
post_one_notification(wqueue, n); post_one_notification(wqueue, n);
unlock_wqueue(wqueue);;
}
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -462,11 +487,12 @@ int add_watch_to_object(struct watch *watch, struct watch_list *wlist) ...@@ -462,11 +487,12 @@ int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
return -EAGAIN; return -EAGAIN;
} }
spin_lock_bh(&wqueue->lock); if (lock_wqueue(wqueue)) {
kref_get(&wqueue->usage); kref_get(&wqueue->usage);
kref_get(&watch->usage); kref_get(&watch->usage);
hlist_add_head(&watch->queue_node, &wqueue->watches); hlist_add_head(&watch->queue_node, &wqueue->watches);
spin_unlock_bh(&wqueue->lock); unlock_wqueue(wqueue);
}
hlist_add_head(&watch->list_node, &wlist->watchers); hlist_add_head(&watch->list_node, &wlist->watchers);
return 0; return 0;
...@@ -520,20 +546,15 @@ int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq, ...@@ -520,20 +546,15 @@ int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq,
wqueue = rcu_dereference(watch->queue); wqueue = rcu_dereference(watch->queue);
/* We don't need the watch list lock for the next bit as RCU is if (lock_wqueue(wqueue)) {
* protecting *wqueue from deallocation.
*/
if (wqueue) {
post_one_notification(wqueue, &n.watch); post_one_notification(wqueue, &n.watch);
spin_lock_bh(&wqueue->lock);
if (!hlist_unhashed(&watch->queue_node)) { if (!hlist_unhashed(&watch->queue_node)) {
hlist_del_init_rcu(&watch->queue_node); hlist_del_init_rcu(&watch->queue_node);
put_watch(watch); put_watch(watch);
} }
spin_unlock_bh(&wqueue->lock); unlock_wqueue(wqueue);
} }
if (wlist->release_watch) { if (wlist->release_watch) {
......
...@@ -2247,6 +2247,10 @@ bool ima_appraise_signature(enum kernel_read_file_id id) ...@@ -2247,6 +2247,10 @@ bool ima_appraise_signature(enum kernel_read_file_id id)
if (id >= READING_MAX_ID) if (id >= READING_MAX_ID)
return false; return false;
if (id == READING_KEXEC_IMAGE && !(ima_appraise & IMA_APPRAISE_ENFORCE)
&& security_locked_down(LOCKDOWN_KEXEC))
return false;
func = read_idmap[id] ?: FILE_CHECK; func = read_idmap[id] ?: FILE_CHECK;
rcu_read_lock(); rcu_read_lock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment