iavf_main.c 144 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright(c) 2013 - 2018 Intel Corporation. */
Greg Rose's avatar
Greg Rose committed
3

4
#include "iavf.h"
5
#include "iavf_prototype.h"
6
#include "iavf_client.h"
7
/* All iavf tracepoints are defined by the include below, which must
8 9 10 11
 * be included exactly once across the whole kernel with
 * CREATE_TRACE_POINTS defined
 */
#define CREATE_TRACE_POINTS
12
#include "iavf_trace.h"
13

14 15 16
static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
static int iavf_close(struct net_device *netdev);
17
static void iavf_init_get_resources(struct iavf_adapter *adapter);
18
static int iavf_check_reset_complete(struct iavf_hw *hw);
Greg Rose's avatar
Greg Rose committed
19

20 21
char iavf_driver_name[] = "iavf";
static const char iavf_driver_string[] =
22
	"Intel(R) Ethernet Adaptive Virtual Function Network Driver";
Greg Rose's avatar
Greg Rose committed
23

24
static const char iavf_copyright[] =
25
	"Copyright (c) 2013 - 2018 Intel Corporation.";
Greg Rose's avatar
Greg Rose committed
26

27
/* iavf_pci_tbl - PCI Device ID Table
Greg Rose's avatar
Greg Rose committed
28 29 30 31 32 33 34
 *
 * Wildcard entries (PCI_ANY_ID) should come last
 * Last entry must be all 0s
 *
 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
 *   Class, Class Mask, private data (not used) }
 */
35
static const struct pci_device_id iavf_pci_tbl[] = {
36 37 38 39
	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
Greg Rose's avatar
Greg Rose committed
40 41 42 43
	/* required last entry */
	{0, }
};

44
MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
Greg Rose's avatar
Greg Rose committed
45

46
MODULE_ALIAS("i40evf");
Greg Rose's avatar
Greg Rose committed
47
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 49
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
Greg Rose's avatar
Greg Rose committed
50

51
static const struct net_device_ops iavf_netdev_ops;
52
struct workqueue_struct *iavf_wq;
53

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
int iavf_status_to_errno(enum iavf_status status)
{
	switch (status) {
	case IAVF_SUCCESS:
		return 0;
	case IAVF_ERR_PARAM:
	case IAVF_ERR_MAC_TYPE:
	case IAVF_ERR_INVALID_MAC_ADDR:
	case IAVF_ERR_INVALID_LINK_SETTINGS:
	case IAVF_ERR_INVALID_PD_ID:
	case IAVF_ERR_INVALID_QP_ID:
	case IAVF_ERR_INVALID_CQ_ID:
	case IAVF_ERR_INVALID_CEQ_ID:
	case IAVF_ERR_INVALID_AEQ_ID:
	case IAVF_ERR_INVALID_SIZE:
	case IAVF_ERR_INVALID_ARP_INDEX:
	case IAVF_ERR_INVALID_FPM_FUNC_ID:
	case IAVF_ERR_QP_INVALID_MSG_SIZE:
	case IAVF_ERR_INVALID_FRAG_COUNT:
	case IAVF_ERR_INVALID_ALIGNMENT:
	case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
	case IAVF_ERR_INVALID_IMM_DATA_SIZE:
	case IAVF_ERR_INVALID_VF_ID:
	case IAVF_ERR_INVALID_HMCFN_ID:
	case IAVF_ERR_INVALID_PBLE_INDEX:
	case IAVF_ERR_INVALID_SD_INDEX:
	case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
	case IAVF_ERR_INVALID_SD_TYPE:
	case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
	case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
	case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
		return -EINVAL;
	case IAVF_ERR_NVM:
	case IAVF_ERR_NVM_CHECKSUM:
	case IAVF_ERR_PHY:
	case IAVF_ERR_CONFIG:
	case IAVF_ERR_UNKNOWN_PHY:
	case IAVF_ERR_LINK_SETUP:
	case IAVF_ERR_ADAPTER_STOPPED:
93
	case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
	case IAVF_ERR_AUTONEG_NOT_COMPLETE:
	case IAVF_ERR_RESET_FAILED:
	case IAVF_ERR_BAD_PTR:
	case IAVF_ERR_SWFW_SYNC:
	case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
	case IAVF_ERR_QUEUE_EMPTY:
	case IAVF_ERR_FLUSHED_QUEUE:
	case IAVF_ERR_OPCODE_MISMATCH:
	case IAVF_ERR_CQP_COMPL_ERROR:
	case IAVF_ERR_BACKING_PAGE_ERROR:
	case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
	case IAVF_ERR_MEMCPY_FAILED:
	case IAVF_ERR_SRQ_ENABLED:
	case IAVF_ERR_ADMIN_QUEUE_ERROR:
	case IAVF_ERR_ADMIN_QUEUE_FULL:
	case IAVF_ERR_BAD_IWARP_CQE:
	case IAVF_ERR_NVM_BLANK_MODE:
	case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
	case IAVF_ERR_DIAG_TEST_FAILED:
	case IAVF_ERR_FIRMWARE_API_VERSION:
	case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
		return -EIO;
	case IAVF_ERR_DEVICE_NOT_SUPPORTED:
		return -ENODEV;
	case IAVF_ERR_NO_AVAILABLE_VSI:
	case IAVF_ERR_RING_FULL:
		return -ENOSPC;
	case IAVF_ERR_NO_MEMORY:
		return -ENOMEM;
	case IAVF_ERR_TIMEOUT:
	case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
		return -ETIMEDOUT;
	case IAVF_ERR_NOT_IMPLEMENTED:
	case IAVF_NOT_SUPPORTED:
		return -EOPNOTSUPP;
	case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
		return -EALREADY;
	case IAVF_ERR_NOT_READY:
		return -EBUSY;
	case IAVF_ERR_BUF_TOO_SHORT:
		return -EMSGSIZE;
	}

	return -EIO;
}

int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
{
	switch (v_status) {
	case VIRTCHNL_STATUS_SUCCESS:
		return 0;
	case VIRTCHNL_STATUS_ERR_PARAM:
	case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
		return -EINVAL;
	case VIRTCHNL_STATUS_ERR_NO_MEMORY:
		return -ENOMEM;
	case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
	case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
	case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
		return -EIO;
	case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
		return -EOPNOTSUPP;
	}

	return -EIO;
}

161 162 163 164 165 166 167 168 169
/**
 * iavf_pdev_to_adapter - go from pci_dev to adapter
 * @pdev: pci_dev pointer
 */
static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
{
	return netdev_priv(pci_get_drvdata(pdev));
}

Greg Rose's avatar
Greg Rose committed
170
/**
171
 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
Greg Rose's avatar
Greg Rose committed
172 173 174 175 176
 * @hw:   pointer to the HW structure
 * @mem:  ptr to mem struct to fill out
 * @size: size of memory requested
 * @alignment: what to align the allocation to
 **/
177 178 179
enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
					 struct iavf_dma_mem *mem,
					 u64 size, u32 alignment)
Greg Rose's avatar
Greg Rose committed
180
{
181
	struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
Greg Rose's avatar
Greg Rose committed
182 183

	if (!mem)
184
		return IAVF_ERR_PARAM;
Greg Rose's avatar
Greg Rose committed
185 186 187 188 189 190 191

	mem->size = ALIGN(size, alignment);
	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
				     (dma_addr_t *)&mem->pa, GFP_KERNEL);
	if (mem->va)
		return 0;
	else
192
		return IAVF_ERR_NO_MEMORY;
Greg Rose's avatar
Greg Rose committed
193 194 195
}

/**
196
 * iavf_free_dma_mem_d - OS specific memory free for shared code
Greg Rose's avatar
Greg Rose committed
197 198 199
 * @hw:   pointer to the HW structure
 * @mem:  ptr to mem struct to free
 **/
200 201
enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
				     struct iavf_dma_mem *mem)
Greg Rose's avatar
Greg Rose committed
202
{
203
	struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
Greg Rose's avatar
Greg Rose committed
204 205

	if (!mem || !mem->va)
206
		return IAVF_ERR_PARAM;
Greg Rose's avatar
Greg Rose committed
207 208 209 210 211 212
	dma_free_coherent(&adapter->pdev->dev, mem->size,
			  mem->va, (dma_addr_t)mem->pa);
	return 0;
}

/**
213
 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
Greg Rose's avatar
Greg Rose committed
214 215 216 217
 * @hw:   pointer to the HW structure
 * @mem:  ptr to mem struct to fill out
 * @size: size of memory requested
 **/
218 219
enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
					  struct iavf_virt_mem *mem, u32 size)
Greg Rose's avatar
Greg Rose committed
220 221
{
	if (!mem)
222
		return IAVF_ERR_PARAM;
Greg Rose's avatar
Greg Rose committed
223 224 225 226 227 228 229

	mem->size = size;
	mem->va = kzalloc(size, GFP_KERNEL);

	if (mem->va)
		return 0;
	else
230
		return IAVF_ERR_NO_MEMORY;
Greg Rose's avatar
Greg Rose committed
231 232 233
}

/**
234
 * iavf_free_virt_mem_d - OS specific memory free for shared code
Greg Rose's avatar
Greg Rose committed
235 236 237
 * @hw:   pointer to the HW structure
 * @mem:  ptr to mem struct to free
 **/
238 239
enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
				      struct iavf_virt_mem *mem)
Greg Rose's avatar
Greg Rose committed
240 241
{
	if (!mem)
242
		return IAVF_ERR_PARAM;
Greg Rose's avatar
Greg Rose committed
243 244 245 246 247 248 249

	/* it's ok to kfree a NULL pointer */
	kfree(mem->va);

	return 0;
}

250
/**
251 252
 * iavf_lock_timeout - try to lock mutex but give up after timeout
 * @lock: mutex that should be locked
253 254 255 256
 * @msecs: timeout in msecs
 *
 * Returns 0 on success, negative on failure
 **/
257
int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
258 259 260 261
{
	unsigned int wait, delay = 10;

	for (wait = 0; wait < msecs; wait += delay) {
262
		if (mutex_trylock(lock))
263 264 265 266 267 268 269 270
			return 0;

		msleep(delay);
	}

	return -1;
}

271
/**
272
 * iavf_schedule_reset - Set the flags and schedule a reset event
273 274
 * @adapter: board private structure
 **/
275
void iavf_schedule_reset(struct iavf_adapter *adapter)
276 277
{
	if (!(adapter->flags &
278 279
	      (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
280
		queue_work(iavf_wq, &adapter->reset_task);
281 282 283
	}
}

284 285 286 287 288 289 290 291 292 293 294 295 296
/**
 * iavf_schedule_request_stats - Set the flags and schedule statistics request
 * @adapter: board private structure
 *
 * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
 * request and refresh ethtool stats
 **/
void iavf_schedule_request_stats(struct iavf_adapter *adapter)
{
	adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
}

Greg Rose's avatar
Greg Rose committed
297
/**
298
 * iavf_tx_timeout - Respond to a Tx Hang
Greg Rose's avatar
Greg Rose committed
299
 * @netdev: network interface device structure
300
 * @txqueue: queue number that is timing out
Greg Rose's avatar
Greg Rose committed
301
 **/
302
static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
Greg Rose's avatar
Greg Rose committed
303
{
304
	struct iavf_adapter *adapter = netdev_priv(netdev);
Greg Rose's avatar
Greg Rose committed
305 306

	adapter->tx_timeout_count++;
307
	iavf_schedule_reset(adapter);
Greg Rose's avatar
Greg Rose committed
308 309 310
}

/**
311
 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
Greg Rose's avatar
Greg Rose committed
312 313
 * @adapter: board private structure
 **/
314
static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
315
{
316
	struct iavf_hw *hw = &adapter->hw;
317

318 319 320
	if (!adapter->msix_entries)
		return;

321
	wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
Greg Rose's avatar
Greg Rose committed
322

323
	iavf_flush(hw);
Greg Rose's avatar
Greg Rose committed
324 325 326 327 328

	synchronize_irq(adapter->msix_entries[0].vector);
}

/**
329
 * iavf_misc_irq_enable - Enable default interrupt generation settings
Greg Rose's avatar
Greg Rose committed
330 331
 * @adapter: board private structure
 **/
332
static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
333
{
334
	struct iavf_hw *hw = &adapter->hw;
335

336 337 338
	wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
				       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
	wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
Greg Rose's avatar
Greg Rose committed
339

340
	iavf_flush(hw);
Greg Rose's avatar
Greg Rose committed
341 342 343
}

/**
344
 * iavf_irq_disable - Mask off interrupt generation on the NIC
Greg Rose's avatar
Greg Rose committed
345 346
 * @adapter: board private structure
 **/
347
static void iavf_irq_disable(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
348 349
{
	int i;
350
	struct iavf_hw *hw = &adapter->hw;
Greg Rose's avatar
Greg Rose committed
351

352 353 354
	if (!adapter->msix_entries)
		return;

Greg Rose's avatar
Greg Rose committed
355
	for (i = 1; i < adapter->num_msix_vectors; i++) {
356
		wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
Greg Rose's avatar
Greg Rose committed
357 358
		synchronize_irq(adapter->msix_entries[i].vector);
	}
359
	iavf_flush(hw);
Greg Rose's avatar
Greg Rose committed
360 361 362
}

/**
363
 * iavf_irq_enable_queues - Enable interrupt for specified queues
Greg Rose's avatar
Greg Rose committed
364 365 366
 * @adapter: board private structure
 * @mask: bitmap of queues to enable
 **/
367
void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
Greg Rose's avatar
Greg Rose committed
368
{
369
	struct iavf_hw *hw = &adapter->hw;
Greg Rose's avatar
Greg Rose committed
370 371 372
	int i;

	for (i = 1; i < adapter->num_msix_vectors; i++) {
373
		if (mask & BIT(i - 1)) {
374 375 376
			wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
			     IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
			     IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
Greg Rose's avatar
Greg Rose committed
377 378 379 380 381
		}
	}
}

/**
382
 * iavf_irq_enable - Enable default interrupt generation settings
Greg Rose's avatar
Greg Rose committed
383
 * @adapter: board private structure
384
 * @flush: boolean value whether to run rd32()
Greg Rose's avatar
Greg Rose committed
385
 **/
386
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
Greg Rose's avatar
Greg Rose committed
387
{
388
	struct iavf_hw *hw = &adapter->hw;
Greg Rose's avatar
Greg Rose committed
389

390 391
	iavf_misc_irq_enable(adapter);
	iavf_irq_enable_queues(adapter, ~0);
Greg Rose's avatar
Greg Rose committed
392 393

	if (flush)
394
		iavf_flush(hw);
Greg Rose's avatar
Greg Rose committed
395 396 397
}

/**
398
 * iavf_msix_aq - Interrupt handler for vector 0
Greg Rose's avatar
Greg Rose committed
399 400 401
 * @irq: interrupt number
 * @data: pointer to netdev
 **/
402
static irqreturn_t iavf_msix_aq(int irq, void *data)
Greg Rose's avatar
Greg Rose committed
403 404
{
	struct net_device *netdev = data;
405
	struct iavf_adapter *adapter = netdev_priv(netdev);
406
	struct iavf_hw *hw = &adapter->hw;
Greg Rose's avatar
Greg Rose committed
407

408
	/* handle non-queue interrupts, these reads clear the registers */
409 410
	rd32(hw, IAVF_VFINT_ICR01);
	rd32(hw, IAVF_VFINT_ICR0_ENA1);
Greg Rose's avatar
Greg Rose committed
411

412 413 414
	if (adapter->state != __IAVF_REMOVE)
		/* schedule work on the private workqueue */
		queue_work(iavf_wq, &adapter->adminq_task);
Greg Rose's avatar
Greg Rose committed
415 416 417 418 419

	return IRQ_HANDLED;
}

/**
420
 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
Greg Rose's avatar
Greg Rose committed
421 422 423
 * @irq: interrupt number
 * @data: pointer to a q_vector
 **/
424
static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
Greg Rose's avatar
Greg Rose committed
425
{
426
	struct iavf_q_vector *q_vector = data;
Greg Rose's avatar
Greg Rose committed
427 428 429 430

	if (!q_vector->tx.ring && !q_vector->rx.ring)
		return IRQ_HANDLED;

431
	napi_schedule_irqoff(&q_vector->napi);
Greg Rose's avatar
Greg Rose committed
432 433 434 435 436

	return IRQ_HANDLED;
}

/**
437
 * iavf_map_vector_to_rxq - associate irqs with rx queues
Greg Rose's avatar
Greg Rose committed
438 439 440 441 442
 * @adapter: board private structure
 * @v_idx: interrupt number
 * @r_idx: queue number
 **/
static void
443
iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
Greg Rose's avatar
Greg Rose committed
444
{
445 446
	struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
	struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
447
	struct iavf_hw *hw = &adapter->hw;
Greg Rose's avatar
Greg Rose committed
448 449 450 451 452 453

	rx_ring->q_vector = q_vector;
	rx_ring->next = q_vector->rx.ring;
	rx_ring->vsi = &adapter->vsi;
	q_vector->rx.ring = rx_ring;
	q_vector->rx.count++;
454
	q_vector->rx.next_update = jiffies + 1;
455
	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
456
	q_vector->ring_mask |= BIT(r_idx);
457
	wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
458
	     q_vector->rx.current_itr >> 1);
459
	q_vector->rx.current_itr = q_vector->rx.target_itr;
Greg Rose's avatar
Greg Rose committed
460 461 462
}

/**
463
 * iavf_map_vector_to_txq - associate irqs with tx queues
Greg Rose's avatar
Greg Rose committed
464 465 466 467 468
 * @adapter: board private structure
 * @v_idx: interrupt number
 * @t_idx: queue number
 **/
static void
469
iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
Greg Rose's avatar
Greg Rose committed
470
{
471 472
	struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
	struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
473
	struct iavf_hw *hw = &adapter->hw;
Greg Rose's avatar
Greg Rose committed
474 475 476 477 478 479

	tx_ring->q_vector = q_vector;
	tx_ring->next = q_vector->tx.ring;
	tx_ring->vsi = &adapter->vsi;
	q_vector->tx.ring = tx_ring;
	q_vector->tx.count++;
480
	q_vector->tx.next_update = jiffies + 1;
481
	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
Greg Rose's avatar
Greg Rose committed
482
	q_vector->num_ringpairs++;
483
	wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
484
	     q_vector->tx.target_itr >> 1);
485
	q_vector->tx.current_itr = q_vector->tx.target_itr;
Greg Rose's avatar
Greg Rose committed
486 487 488
}

/**
489
 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
Greg Rose's avatar
Greg Rose committed
490 491 492 493 494 495 496 497
 * @adapter: board private structure to initialize
 *
 * This function maps descriptor rings to the queue-specific vectors
 * we were allotted through the MSI-X enabling code.  Ideally, we'd have
 * one vector per ring/queue, but on a constrained vector budget, we
 * group the rings as "efficiently" as possible.  You would add new
 * mapping configurations in here.
 **/
498
static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
499
{
500 501
	int rings_remaining = adapter->num_active_queues;
	int ridx = 0, vidx = 0;
Greg Rose's avatar
Greg Rose committed
502 503 504 505
	int q_vectors;

	q_vectors = adapter->num_msix_vectors - NONQ_VECS;

506
	for (; ridx < rings_remaining; ridx++) {
507 508
		iavf_map_vector_to_rxq(adapter, vidx, ridx);
		iavf_map_vector_to_txq(adapter, vidx, ridx);
Greg Rose's avatar
Greg Rose committed
509

510 511 512 513 514
		/* In the case where we have more queues than vectors, continue
		 * round-robin on vectors until all queues are mapped.
		 */
		if (++vidx >= q_vectors)
			vidx = 0;
Greg Rose's avatar
Greg Rose committed
515 516
	}

517
	adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
Greg Rose's avatar
Greg Rose committed
518 519
}

520
/**
521
 * iavf_irq_affinity_notify - Callback for affinity changes
522 523 524 525 526 527
 * @notify: context as to what irq was changed
 * @mask: the new affinity mask
 *
 * This is a callback function used by the irq_set_affinity_notifier function
 * so that we may register to receive changes to the irq affinity masks.
 **/
528 529
static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
				     const cpumask_t *mask)
530
{
531 532
	struct iavf_q_vector *q_vector =
		container_of(notify, struct iavf_q_vector, affinity_notify);
533

534
	cpumask_copy(&q_vector->affinity_mask, mask);
535 536 537
}

/**
538
 * iavf_irq_affinity_release - Callback for affinity notifier release
539 540 541 542 543 544
 * @ref: internal core kernel usage
 *
 * This is a callback function used by the irq_set_affinity_notifier function
 * to inform the current notification subscriber that they will no longer
 * receive notifications.
 **/
545
static void iavf_irq_affinity_release(struct kref *ref) {}
546

Greg Rose's avatar
Greg Rose committed
547
/**
548
 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
Greg Rose's avatar
Greg Rose committed
549
 * @adapter: board private structure
550
 * @basename: device basename
Greg Rose's avatar
Greg Rose committed
551 552 553 554 555
 *
 * Allocates MSI-X vectors for tx and rx handling, and requests
 * interrupts from the kernel.
 **/
static int
556
iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
Greg Rose's avatar
Greg Rose committed
557
{
558 559 560
	unsigned int vector, q_vectors;
	unsigned int rx_int_idx = 0, tx_int_idx = 0;
	int irq_num, err;
561
	int cpu;
Greg Rose's avatar
Greg Rose committed
562

563
	iavf_irq_disable(adapter);
Greg Rose's avatar
Greg Rose committed
564 565 566 567
	/* Decrement for Other and TCP Timer vectors */
	q_vectors = adapter->num_msix_vectors - NONQ_VECS;

	for (vector = 0; vector < q_vectors; vector++) {
568
		struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
569

570
		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
Greg Rose's avatar
Greg Rose committed
571 572

		if (q_vector->tx.ring && q_vector->rx.ring) {
573
			snprintf(q_vector->name, sizeof(q_vector->name),
574
				 "iavf-%s-TxRx-%u", basename, rx_int_idx++);
Greg Rose's avatar
Greg Rose committed
575 576
			tx_int_idx++;
		} else if (q_vector->rx.ring) {
577
			snprintf(q_vector->name, sizeof(q_vector->name),
578
				 "iavf-%s-rx-%u", basename, rx_int_idx++);
Greg Rose's avatar
Greg Rose committed
579
		} else if (q_vector->tx.ring) {
580
			snprintf(q_vector->name, sizeof(q_vector->name),
581
				 "iavf-%s-tx-%u", basename, tx_int_idx++);
Greg Rose's avatar
Greg Rose committed
582 583 584 585
		} else {
			/* skip this unused q_vector */
			continue;
		}
586
		err = request_irq(irq_num,
587
				  iavf_msix_clean_rings,
588 589 590
				  0,
				  q_vector->name,
				  q_vector);
Greg Rose's avatar
Greg Rose committed
591 592
		if (err) {
			dev_info(&adapter->pdev->dev,
593
				 "Request_irq failed, error: %d\n", err);
Greg Rose's avatar
Greg Rose committed
594 595
			goto free_queue_irqs;
		}
596
		/* register for affinity change notifications */
597
		q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
598
		q_vector->affinity_notify.release =
599
						   iavf_irq_affinity_release;
600
		irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
601 602
		/* Spread the IRQ affinity hints across online CPUs. Note that
		 * get_cpu_mask returns a mask with a permanent lifetime so
603
		 * it's safe to use as a hint for irq_update_affinity_hint.
604
		 */
605
		cpu = cpumask_local_spread(q_vector->v_idx, -1);
606
		irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
Greg Rose's avatar
Greg Rose committed
607 608 609 610 611 612 613
	}

	return 0;

free_queue_irqs:
	while (vector) {
		vector--;
614 615
		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
		irq_set_affinity_notifier(irq_num, NULL);
616
		irq_update_affinity_hint(irq_num, NULL);
617
		free_irq(irq_num, &adapter->q_vectors[vector]);
Greg Rose's avatar
Greg Rose committed
618 619 620 621 622
	}
	return err;
}

/**
623
 * iavf_request_misc_irq - Initialize MSI-X interrupts
Greg Rose's avatar
Greg Rose committed
624 625 626 627 628 629
 * @adapter: board private structure
 *
 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
 * vector is only for the admin queue, and stays active even when the netdev
 * is closed.
 **/
630
static int iavf_request_misc_irq(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
631 632 633 634
{
	struct net_device *netdev = adapter->netdev;
	int err;

635
	snprintf(adapter->misc_vector_name,
636
		 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
637
		 dev_name(&adapter->pdev->dev));
Greg Rose's avatar
Greg Rose committed
638
	err = request_irq(adapter->msix_entries[0].vector,
639
			  &iavf_msix_aq, 0,
640
			  adapter->misc_vector_name, netdev);
Greg Rose's avatar
Greg Rose committed
641 642
	if (err) {
		dev_err(&adapter->pdev->dev,
643 644
			"request_irq for %s failed: %d\n",
			adapter->misc_vector_name, err);
Greg Rose's avatar
Greg Rose committed
645 646 647 648 649 650
		free_irq(adapter->msix_entries[0].vector, netdev);
	}
	return err;
}

/**
651
 * iavf_free_traffic_irqs - Free MSI-X interrupts
Greg Rose's avatar
Greg Rose committed
652 653 654 655
 * @adapter: board private structure
 *
 * Frees all MSI-X vectors other than 0.
 **/
656
static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
657
{
658
	int vector, irq_num, q_vectors;
659

660 661 662
	if (!adapter->msix_entries)
		return;

Greg Rose's avatar
Greg Rose committed
663 664
	q_vectors = adapter->num_msix_vectors - NONQ_VECS;

665 666 667
	for (vector = 0; vector < q_vectors; vector++) {
		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
		irq_set_affinity_notifier(irq_num, NULL);
668
		irq_update_affinity_hint(irq_num, NULL);
669
		free_irq(irq_num, &adapter->q_vectors[vector]);
Greg Rose's avatar
Greg Rose committed
670 671 672 673
	}
}

/**
674
 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
Greg Rose's avatar
Greg Rose committed
675 676 677 678
 * @adapter: board private structure
 *
 * Frees MSI-X vector 0.
 **/
679
static void iavf_free_misc_irq(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
680 681 682
{
	struct net_device *netdev = adapter->netdev;

683 684 685
	if (!adapter->msix_entries)
		return;

Greg Rose's avatar
Greg Rose committed
686 687 688 689
	free_irq(adapter->msix_entries[0].vector, netdev);
}

/**
690
 * iavf_configure_tx - Configure Transmit Unit after Reset
Greg Rose's avatar
Greg Rose committed
691 692 693 694
 * @adapter: board private structure
 *
 * Configure the Tx unit of the MAC after a reset.
 **/
695
static void iavf_configure_tx(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
696
{
697
	struct iavf_hw *hw = &adapter->hw;
Greg Rose's avatar
Greg Rose committed
698
	int i;
699

700
	for (i = 0; i < adapter->num_active_queues; i++)
701
		adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
Greg Rose's avatar
Greg Rose committed
702 703 704
}

/**
705
 * iavf_configure_rx - Configure Receive Unit after Reset
Greg Rose's avatar
Greg Rose committed
706 707 708 709
 * @adapter: board private structure
 *
 * Configure the Rx unit of the MAC after a reset.
 **/
710
static void iavf_configure_rx(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
711
{
712
	unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
713
	struct iavf_hw *hw = &adapter->hw;
Greg Rose's avatar
Greg Rose committed
714 715
	int i;

716 717
	/* Legacy Rx will always default to a 2048 buffer size. */
#if (PAGE_SIZE < 8192)
718
	if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
719 720
		struct net_device *netdev = adapter->netdev;

721 722 723 724
		/* For jumbo frames on systems with 4K pages we have to use
		 * an order 1 page, so we might as well increase the size
		 * of our Rx buffer to make better use of the available space
		 */
725
		rx_buf_len = IAVF_RXBUFFER_3072;
726

727 728 729 730
		/* We use a 1536 buffer size for configurations with
		 * standard Ethernet mtu.  On x86 this gives us enough room
		 * for shared info and 192 bytes of padding.
		 */
731
		if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
732
		    (netdev->mtu <= ETH_DATA_LEN))
733
			rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
734 735 736
	}
#endif

737
	for (i = 0; i < adapter->num_active_queues; i++) {
738
		adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
739
		adapter->rx_rings[i].rx_buf_len = rx_buf_len;
740

741
		if (adapter->flags & IAVF_FLAG_LEGACY_RX)
742 743 744
			clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
		else
			set_ring_build_skb_enabled(&adapter->rx_rings[i]);
Greg Rose's avatar
Greg Rose committed
745 746 747 748
	}
}

/**
749
 * iavf_find_vlan - Search filter list for specific vlan filter
Greg Rose's avatar
Greg Rose committed
750 751 752
 * @adapter: board private structure
 * @vlan: vlan tag
 *
753 754
 * Returns ptr to the filter object or NULL. Must be called while holding the
 * mac_vlan_list_lock.
Greg Rose's avatar
Greg Rose committed
755 756
 **/
static struct
757 758
iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
				 struct iavf_vlan vlan)
Greg Rose's avatar
Greg Rose committed
759
{
760
	struct iavf_vlan_filter *f;
Greg Rose's avatar
Greg Rose committed
761 762

	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
763 764
		if (f->vlan.vid == vlan.vid &&
		    f->vlan.tpid == vlan.tpid)
Greg Rose's avatar
Greg Rose committed
765 766
			return f;
	}
767

Greg Rose's avatar
Greg Rose committed
768 769 770 771
	return NULL;
}

/**
772
 * iavf_add_vlan - Add a vlan filter to the list
Greg Rose's avatar
Greg Rose committed
773 774 775 776 777 778
 * @adapter: board private structure
 * @vlan: VLAN tag
 *
 * Returns ptr to the filter object or NULL when no memory available.
 **/
static struct
779 780
iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
				struct iavf_vlan vlan)
Greg Rose's avatar
Greg Rose committed
781
{
782
	struct iavf_vlan_filter *f = NULL;
783

784
	spin_lock_bh(&adapter->mac_vlan_list_lock);
Greg Rose's avatar
Greg Rose committed
785

786
	f = iavf_find_vlan(adapter, vlan);
787
	if (!f) {
788
		f = kzalloc(sizeof(*f), GFP_ATOMIC);
789
		if (!f)
790
			goto clearout;
791

Greg Rose's avatar
Greg Rose committed
792 793
		f->vlan = vlan;

794
		list_add_tail(&f->list, &adapter->vlan_filter_list);
Greg Rose's avatar
Greg Rose committed
795
		f->add = true;
796
		adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
Greg Rose's avatar
Greg Rose committed
797 798
	}

799
clearout:
800
	spin_unlock_bh(&adapter->mac_vlan_list_lock);
Greg Rose's avatar
Greg Rose committed
801 802 803 804
	return f;
}

/**
805
 * iavf_del_vlan - Remove a vlan filter from the list
Greg Rose's avatar
Greg Rose committed
806 807 808
 * @adapter: board private structure
 * @vlan: VLAN tag
 **/
809
static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
Greg Rose's avatar
Greg Rose committed
810
{
811
	struct iavf_vlan_filter *f;
812

813
	spin_lock_bh(&adapter->mac_vlan_list_lock);
Greg Rose's avatar
Greg Rose committed
814

815
	f = iavf_find_vlan(adapter, vlan);
Greg Rose's avatar
Greg Rose committed
816 817
	if (f) {
		f->remove = true;
818
		adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
Greg Rose's avatar
Greg Rose committed
819
	}
820 821

	spin_unlock_bh(&adapter->mac_vlan_list_lock);
Greg Rose's avatar
Greg Rose committed
822 823
}

824 825 826 827 828 829 830 831
/**
 * iavf_restore_filters
 * @adapter: board private structure
 *
 * Restore existing non MAC filters when VF netdev comes back up
 **/
static void iavf_restore_filters(struct iavf_adapter *adapter)
{
832
	u16 vid;
833

834
	/* re-add all VLAN filters */
835 836 837 838 839
	for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
		iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));

	for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
		iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
840 841
}

842 843 844 845
/**
 * iavf_get_num_vlans_added - get number of VLANs added
 * @adapter: board private structure
 */
846
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
{
	return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
		bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
}

/**
 * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
 * @adapter: board private structure
 *
 * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
 * do not impose a limit as that maintains current behavior and for
 * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
 **/
static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
{
	/* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
	 * never been a limit on the VF driver side
	 */
	if (VLAN_ALLOWED(adapter))
		return VLAN_N_VID;
	else if (VLAN_V2_ALLOWED(adapter))
		return adapter->vlan_v2_caps.filtering.max_filters;

	return 0;
}

/**
 * iavf_max_vlans_added - check if maximum VLANs allowed already exist
 * @adapter: board private structure
 **/
static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
{
	if (iavf_get_num_vlans_added(adapter) <
	    iavf_get_max_vlans_allowed(adapter))
		return false;

	return true;
}

Greg Rose's avatar
Greg Rose committed
886
/**
887
 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
Greg Rose's avatar
Greg Rose committed
888
 * @netdev: network device struct
889
 * @proto: unused protocol data
Greg Rose's avatar
Greg Rose committed
890 891
 * @vid: VLAN tag
 **/
892 893
static int iavf_vlan_rx_add_vid(struct net_device *netdev,
				__always_unused __be16 proto, u16 vid)
Greg Rose's avatar
Greg Rose committed
894
{
895
	struct iavf_adapter *adapter = netdev_priv(netdev);
Greg Rose's avatar
Greg Rose committed
896

897
	if (!VLAN_FILTERING_ALLOWED(adapter))
898
		return -EIO;
899

900 901 902 903 904 905
	if (iavf_max_vlans_added(adapter)) {
		netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
			   iavf_get_max_vlans_allowed(adapter));
		return -EIO;
	}

906
	if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
Greg Rose's avatar
Greg Rose committed
907
		return -ENOMEM;
908

Greg Rose's avatar
Greg Rose committed
909 910 911 912
	return 0;
}

/**
913
 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
Greg Rose's avatar
Greg Rose committed
914
 * @netdev: network device struct
915
 * @proto: unused protocol data
Greg Rose's avatar
Greg Rose committed
916 917
 * @vid: VLAN tag
 **/
918 919
static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
				 __always_unused __be16 proto, u16 vid)
Greg Rose's avatar
Greg Rose committed
920
{
921
	struct iavf_adapter *adapter = netdev_priv(netdev);
Greg Rose's avatar
Greg Rose committed
922

923 924 925 926 927
	iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
	if (proto == cpu_to_be16(ETH_P_8021Q))
		clear_bit(vid, adapter->vsi.active_cvlans);
	else
		clear_bit(vid, adapter->vsi.active_svlans);
928 929

	return 0;
Greg Rose's avatar
Greg Rose committed
930 931 932
}

/**
933
 * iavf_find_filter - Search filter list for specific mac filter
Greg Rose's avatar
Greg Rose committed
934 935 936
 * @adapter: board private structure
 * @macaddr: the MAC address
 *
937 938
 * Returns ptr to the filter object or NULL. Must be called while holding the
 * mac_vlan_list_lock.
Greg Rose's avatar
Greg Rose committed
939 940
 **/
static struct
941 942
iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
				  const u8 *macaddr)
Greg Rose's avatar
Greg Rose committed
943
{
944
	struct iavf_mac_filter *f;
Greg Rose's avatar
Greg Rose committed
945 946 947 948 949 950 951 952 953 954 955 956

	if (!macaddr)
		return NULL;

	list_for_each_entry(f, &adapter->mac_filter_list, list) {
		if (ether_addr_equal(macaddr, f->macaddr))
			return f;
	}
	return NULL;
}

/**
957
 * iavf_add_filter - Add a mac filter to the filter list
Greg Rose's avatar
Greg Rose committed
958 959 960 961 962
 * @adapter: board private structure
 * @macaddr: the MAC address
 *
 * Returns ptr to the filter object or NULL when no memory available.
 **/
963 964
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
					const u8 *macaddr)
Greg Rose's avatar
Greg Rose committed
965
{
966
	struct iavf_mac_filter *f;
Greg Rose's avatar
Greg Rose committed
967 968 969 970

	if (!macaddr)
		return NULL;

971
	f = iavf_find_filter(adapter, macaddr);
972
	if (!f) {
Greg Rose's avatar
Greg Rose committed
973
		f = kzalloc(sizeof(*f), GFP_ATOMIC);
974
		if (!f)
975
			return f;
Greg Rose's avatar
Greg Rose committed
976

977
		ether_addr_copy(f->macaddr, macaddr);
Greg Rose's avatar
Greg Rose committed
978

979
		list_add_tail(&f->list, &adapter->mac_filter_list);
Greg Rose's avatar
Greg Rose committed
980
		f->add = true;
981
		f->add_handled = false;
982
		f->is_new_mac = true;
983
		f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
984
		adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
985 986
	} else {
		f->remove = false;
Greg Rose's avatar
Greg Rose committed
987 988 989 990 991 992
	}

	return f;
}

/**
993 994 995
 * iavf_replace_primary_mac - Replace current primary address
 * @adapter: board private structure
 * @new_mac: new MAC address to be applied
Greg Rose's avatar
Greg Rose committed
996
 *
997 998 999 1000 1001
 * Replace current dev_addr and send request to PF for removal of previous
 * primary MAC address filter and addition of new primary MAC filter.
 * Return 0 for success, -ENOMEM for failure.
 *
 * Do not call this with mac_vlan_list_lock!
Greg Rose's avatar
Greg Rose committed
1002
 **/
1003 1004
int iavf_replace_primary_mac(struct iavf_adapter *adapter,
			     const u8 *new_mac)
Greg Rose's avatar
Greg Rose committed
1005
{
1006
	struct iavf_hw *hw = &adapter->hw;
1007
	struct iavf_mac_filter *f;
Greg Rose's avatar
Greg Rose committed
1008

1009 1010
	spin_lock_bh(&adapter->mac_vlan_list_lock);

1011 1012 1013 1014
	list_for_each_entry(f, &adapter->mac_filter_list, list) {
		f->is_primary = false;
	}

1015
	f = iavf_find_filter(adapter, hw->mac.addr);
1016 1017
	if (f) {
		f->remove = true;
1018
		adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1019 1020
	}

1021 1022
	f = iavf_add_filter(adapter, new_mac);

Greg Rose's avatar
Greg Rose committed
1023
	if (f) {
1024 1025 1026
		/* Always send the request to add if changing primary MAC
		 * even if filter is already present on the list
		 */
1027
		f->is_primary = true;
1028 1029 1030
		f->add = true;
		adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
		ether_addr_copy(hw->mac.addr, new_mac);
Greg Rose's avatar
Greg Rose committed
1031 1032
	}

1033 1034 1035
	spin_unlock_bh(&adapter->mac_vlan_list_lock);

	/* schedule the watchdog task to immediately process the request */
1036
	if (f) {
1037
		queue_work(iavf_wq, &adapter->watchdog_task.work);
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
		return 0;
	}
	return -ENOMEM;
}

/**
 * iavf_is_mac_set_handled - wait for a response to set MAC from PF
 * @netdev: network interface device structure
 * @macaddr: MAC address to set
 *
 * Returns true on success, false on failure
 */
static bool iavf_is_mac_set_handled(struct net_device *netdev,
				    const u8 *macaddr)
{
	struct iavf_adapter *adapter = netdev_priv(netdev);
	struct iavf_mac_filter *f;
	bool ret = false;

	spin_lock_bh(&adapter->mac_vlan_list_lock);

	f = iavf_find_filter(adapter, macaddr);

	if (!f || (!f->add && f->add_handled))
		ret = true;

	spin_unlock_bh(&adapter->mac_vlan_list_lock);

	return ret;
}

/**
 * iavf_set_mac - NDO callback to set port MAC address
 * @netdev: network interface device structure
 * @p: pointer to an address structure
 *
 * Returns 0 on success, negative on failure
 */
static int iavf_set_mac(struct net_device *netdev, void *p)
{
	struct iavf_adapter *adapter = netdev_priv(netdev);
	struct sockaddr *addr = p;
	bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data);
	int ret;
1082

1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

	ret = iavf_replace_primary_mac(adapter, addr->sa_data);

	if (ret)
		return ret;

	/* If this is an initial set MAC during VF spawn do not wait */
	if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) {
		adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET;
		return 0;
	}

	if (handle_mac)
		goto done;

	ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500));

	/* If ret < 0 then it means wait was interrupted.
	 * If ret == 0 then it means we got a timeout.
	 * else it means we got response for set MAC from PF,
	 * check if netdev MAC was updated to requested MAC,
	 * if yes then set MAC succeeded otherwise it failed return -EACCES
	 */
	if (ret < 0)
		return ret;

	if (!ret)
		return -EAGAIN;

done:
	if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
		return -EACCES;

	return 0;
Greg Rose's avatar
Greg Rose committed
1119 1120 1121
}

/**
1122
 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1123 1124 1125 1126 1127 1128
 * @netdev: the netdevice
 * @addr: address to add
 *
 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
 */
1129
static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
Greg Rose's avatar
Greg Rose committed
1130
{
1131
	struct iavf_adapter *adapter = netdev_priv(netdev);
1132

1133
	if (iavf_add_filter(adapter, addr))
1134 1135 1136 1137
		return 0;
	else
		return -ENOMEM;
}
1138

1139
/**
1140
 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1141 1142 1143 1144 1145 1146
 * @netdev: the netdevice
 * @addr: address to add
 *
 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
 */
1147
static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
1148
{
1149 1150
	struct iavf_adapter *adapter = netdev_priv(netdev);
	struct iavf_mac_filter *f;
1151

1152 1153 1154 1155 1156 1157 1158
	/* Under some circumstances, we might receive a request to delete
	 * our own device address from our uc list. Because we store the
	 * device address in the VSI's MAC/VLAN filter list, we need to ignore
	 * such requests and not delete our device address from this list.
	 */
	if (ether_addr_equal(addr, netdev->dev_addr))
		return 0;
1159

1160
	f = iavf_find_filter(adapter, addr);
1161
	if (f) {
1162
		f->remove = true;
1163
		adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
Greg Rose's avatar
Greg Rose committed
1164
	}
1165 1166 1167 1168
	return 0;
}

/**
1169
 * iavf_set_rx_mode - NDO callback to set the netdev filters
1170 1171
 * @netdev: network interface device structure
 **/
1172
static void iavf_set_rx_mode(struct net_device *netdev)
1173
{
1174
	struct iavf_adapter *adapter = netdev_priv(netdev);
1175 1176

	spin_lock_bh(&adapter->mac_vlan_list_lock);
1177 1178
	__dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
	__dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1179
	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1180 1181

	if (netdev->flags & IFF_PROMISC &&
1182 1183
	    !(adapter->flags & IAVF_FLAG_PROMISC_ON))
		adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
1184
	else if (!(netdev->flags & IFF_PROMISC) &&
1185 1186
		 adapter->flags & IAVF_FLAG_PROMISC_ON)
		adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
1187

1188
	if (netdev->flags & IFF_ALLMULTI &&
1189 1190
	    !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
		adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
1191
	else if (!(netdev->flags & IFF_ALLMULTI) &&
1192 1193
		 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
		adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
Greg Rose's avatar
Greg Rose committed
1194 1195 1196
}

/**
1197
 * iavf_napi_enable_all - enable NAPI on all queue vectors
Greg Rose's avatar
Greg Rose committed
1198 1199
 * @adapter: board private structure
 **/
1200
static void iavf_napi_enable_all(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
1201 1202
{
	int q_idx;
1203
	struct iavf_q_vector *q_vector;
Greg Rose's avatar
Greg Rose committed
1204 1205 1206 1207
	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;

	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
		struct napi_struct *napi;
1208

1209
		q_vector = &adapter->q_vectors[q_idx];
Greg Rose's avatar
Greg Rose committed
1210 1211 1212 1213 1214 1215
		napi = &q_vector->napi;
		napi_enable(napi);
	}
}

/**
1216
 * iavf_napi_disable_all - disable NAPI on all queue vectors
Greg Rose's avatar
Greg Rose committed
1217 1218
 * @adapter: board private structure
 **/
1219
static void iavf_napi_disable_all(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
1220 1221
{
	int q_idx;
1222
	struct iavf_q_vector *q_vector;
Greg Rose's avatar
Greg Rose committed
1223 1224 1225
	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;

	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1226
		q_vector = &adapter->q_vectors[q_idx];
Greg Rose's avatar
Greg Rose committed
1227 1228 1229 1230 1231
		napi_disable(&q_vector->napi);
	}
}

/**
1232
 * iavf_configure - set up transmit and receive data structures
Greg Rose's avatar
Greg Rose committed
1233 1234
 * @adapter: board private structure
 **/
1235
static void iavf_configure(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
1236 1237 1238 1239
{
	struct net_device *netdev = adapter->netdev;
	int i;

1240
	iavf_set_rx_mode(netdev);
Greg Rose's avatar
Greg Rose committed
1241

1242 1243 1244
	iavf_configure_tx(adapter);
	iavf_configure_rx(adapter);
	adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
Greg Rose's avatar
Greg Rose committed
1245

1246
	for (i = 0; i < adapter->num_active_queues; i++) {
1247
		struct iavf_ring *ring = &adapter->rx_rings[i];
1248

1249
		iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
Greg Rose's avatar
Greg Rose committed
1250 1251 1252 1253
	}
}

/**
1254
 * iavf_up_complete - Finish the last steps of bringing up a connection
Greg Rose's avatar
Greg Rose committed
1255
 * @adapter: board private structure
1256
 *
1257
 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
Greg Rose's avatar
Greg Rose committed
1258
 **/
1259
static void iavf_up_complete(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
1260
{
1261
	iavf_change_state(adapter, __IAVF_RUNNING);
1262
	clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
Greg Rose's avatar
Greg Rose committed
1263

1264
	iavf_napi_enable_all(adapter);
Greg Rose's avatar
Greg Rose committed
1265

1266
	adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1267
	if (CLIENT_ENABLED(adapter))
1268
		adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1269
	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
Greg Rose's avatar
Greg Rose committed
1270 1271 1272
}

/**
1273 1274
 * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
 * yet and mark other to be removed.
Greg Rose's avatar
Greg Rose committed
1275 1276
 * @adapter: board private structure
 **/
1277
static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
1278
{
1279 1280
	struct iavf_vlan_filter *vlf, *vlftmp;
	struct iavf_mac_filter *f, *ftmp;
1281

1282
	spin_lock_bh(&adapter->mac_vlan_list_lock);
1283 1284 1285 1286
	/* clear the sync flag on all filters */
	__dev_uc_unsync(adapter->netdev, NULL);
	__dev_mc_unsync(adapter->netdev, NULL);

1287
	/* remove all MAC filters */
1288 1289 1290 1291 1292 1293 1294 1295
	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
				 list) {
		if (f->add) {
			list_del(&f->list);
			kfree(f);
		} else {
			f->remove = true;
		}
Greg Rose's avatar
Greg Rose committed
1296
	}
1297

1298
	/* remove all VLAN filters */
1299 1300 1301 1302 1303 1304 1305 1306
	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
				 list) {
		if (vlf->add) {
			list_del(&vlf->list);
			kfree(vlf);
		} else {
			vlf->remove = true;
		}
1307
	}
1308
	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
}

/**
 * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
 * mark other to be removed.
 * @adapter: board private structure
 **/
static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
{
	struct iavf_cloud_filter *cf, *cftmp;
1319

1320 1321
	/* remove all cloud filters */
	spin_lock_bh(&adapter->cloud_filter_list_lock);
1322 1323 1324 1325 1326 1327 1328 1329 1330
	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
				 list) {
		if (cf->add) {
			list_del(&cf->list);
			kfree(cf);
			adapter->num_cloud_filters--;
		} else {
			cf->del = true;
		}
1331 1332
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
}

/**
 * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
 * other to be removed.
 * @adapter: board private structure
 **/
static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
{
	struct iavf_fdir_fltr *fdir, *fdirtmp;
1343

1344 1345
	/* remove all Flow Director filters */
	spin_lock_bh(&adapter->fdir_fltr_lock);
1346 1347 1348 1349 1350 1351 1352 1353 1354
	list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
				 list) {
		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
			list_del(&fdir->list);
			kfree(fdir);
			adapter->fdir_active_fltr--;
		} else {
			fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
		}
1355 1356
	}
	spin_unlock_bh(&adapter->fdir_fltr_lock);
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
}

/**
 * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
 * other to be removed.
 * @adapter: board private structure
 **/
static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
{
	struct iavf_adv_rss *rss, *rsstmp;
1367

1368 1369
	/* remove all advance RSS configuration */
	spin_lock_bh(&adapter->adv_rss_lock);
1370 1371 1372 1373 1374 1375 1376 1377 1378
	list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
				 list) {
		if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
			list_del(&rss->list);
			kfree(rss);
		} else {
			rss->state = IAVF_ADV_RSS_DEL_REQUEST;
		}
	}
1379
	spin_unlock_bh(&adapter->adv_rss_lock);
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
}

/**
 * iavf_down - Shutdown the connection processing
 * @adapter: board private structure
 *
 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
 **/
void iavf_down(struct iavf_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;

	if (adapter->state <= __IAVF_DOWN_PENDING)
		return;

	netif_carrier_off(netdev);
	netif_tx_disable(netdev);
	adapter->link_up = false;
	iavf_napi_disable_all(adapter);
	iavf_irq_disable(adapter);

	iavf_clear_mac_vlan_filters(adapter);
	iavf_clear_cloud_filters(adapter);
	iavf_clear_fdir_filters(adapter);
	iavf_clear_adv_rss_conf(adapter);
1405

1406
	if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
1407
		/* cancel any current operation */
1408
		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1409 1410 1411 1412
		/* Schedule operations to close down the HW. Don't wait
		 * here for this to complete. The watchdog is still running
		 * and it will take care of this.
		 */
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
		if (!list_empty(&adapter->mac_filter_list))
			adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
		if (!list_empty(&adapter->vlan_filter_list))
			adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
		if (!list_empty(&adapter->cloud_filter_list))
			adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
		if (!list_empty(&adapter->fdir_list_head))
			adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
		if (!list_empty(&adapter->adv_rss_list_head))
			adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1423
		adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1424
	}
Greg Rose's avatar
Greg Rose committed
1425

1426
	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
Greg Rose's avatar
Greg Rose committed
1427 1428 1429
}

/**
1430
 * iavf_acquire_msix_vectors - Setup the MSIX capability
Greg Rose's avatar
Greg Rose committed
1431 1432 1433 1434 1435 1436 1437 1438
 * @adapter: board private structure
 * @vectors: number of vectors to request
 *
 * Work with the OS to set up the MSIX vectors needed.
 *
 * Returns 0 on success, negative on failure
 **/
static int
1439
iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
Greg Rose's avatar
Greg Rose committed
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454
{
	int err, vector_threshold;

	/* We'll want at least 3 (vector_threshold):
	 * 0) Other (Admin Queue and link, mostly)
	 * 1) TxQ[0] Cleanup
	 * 2) RxQ[0] Cleanup
	 */
	vector_threshold = MIN_MSIX_COUNT;

	/* The more we get, the more we will assign to Tx/Rx Cleanup
	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
	 * Right now, we simply care about how many we'll get; we'll
	 * set them up later while requesting irq's.
	 */
1455 1456 1457
	err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
				    vector_threshold, vectors);
	if (err < 0) {
1458
		dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
Greg Rose's avatar
Greg Rose committed
1459 1460
		kfree(adapter->msix_entries);
		adapter->msix_entries = NULL;
1461
		return err;
Greg Rose's avatar
Greg Rose committed
1462
	}
1463 1464 1465 1466 1467 1468 1469

	/* Adjust for only the vectors we'll use, which is minimum
	 * of max_msix_q_vectors + NONQ_VECS, or the number of
	 * vectors we were allocated.
	 */
	adapter->num_msix_vectors = err;
	return 0;
Greg Rose's avatar
Greg Rose committed
1470 1471 1472
}

/**
1473
 * iavf_free_queues - Free memory for all rings
Greg Rose's avatar
Greg Rose committed
1474 1475 1476 1477
 * @adapter: board private structure to initialize
 *
 * Free all of the memory associated with queue pairs.
 **/
1478
static void iavf_free_queues(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
1479 1480 1481
{
	if (!adapter->vsi_res)
		return;
1482
	adapter->num_active_queues = 0;
1483
	kfree(adapter->tx_rings);
1484
	adapter->tx_rings = NULL;
1485
	kfree(adapter->rx_rings);
1486
	adapter->rx_rings = NULL;
Greg Rose's avatar
Greg Rose committed
1487 1488
}

1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568
/**
 * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
 * @adapter: board private structure
 *
 * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
 * stripped in certain descriptor fields. Instead of checking the offload
 * capability bits in the hot path, cache the location the ring specific
 * flags.
 */
void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
{
	int i;

	for (i = 0; i < adapter->num_active_queues; i++) {
		struct iavf_ring *tx_ring = &adapter->tx_rings[i];
		struct iavf_ring *rx_ring = &adapter->rx_rings[i];

		/* prevent multiple L2TAG bits being set after VFR */
		tx_ring->flags &=
			~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
			  IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
		rx_ring->flags &=
			~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
			  IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);

		if (VLAN_ALLOWED(adapter)) {
			tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
			rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
		} else if (VLAN_V2_ALLOWED(adapter)) {
			struct virtchnl_vlan_supported_caps *stripping_support;
			struct virtchnl_vlan_supported_caps *insertion_support;

			stripping_support =
				&adapter->vlan_v2_caps.offloads.stripping_support;
			insertion_support =
				&adapter->vlan_v2_caps.offloads.insertion_support;

			if (stripping_support->outer) {
				if (stripping_support->outer &
				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
					rx_ring->flags |=
						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
				else if (stripping_support->outer &
					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
					rx_ring->flags |=
						IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
			} else if (stripping_support->inner) {
				if (stripping_support->inner &
				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
					rx_ring->flags |=
						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
				else if (stripping_support->inner &
					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
					rx_ring->flags |=
						IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
			}

			if (insertion_support->outer) {
				if (insertion_support->outer &
				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
					tx_ring->flags |=
						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
				else if (insertion_support->outer &
					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
					tx_ring->flags |=
						IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
			} else if (insertion_support->inner) {
				if (insertion_support->inner &
				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
					tx_ring->flags |=
						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
				else if (insertion_support->inner &
					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
					tx_ring->flags |=
						IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
			}
		}
	}
}

Greg Rose's avatar
Greg Rose committed
1569
/**
1570
 * iavf_alloc_queues - Allocate memory for all rings
Greg Rose's avatar
Greg Rose committed
1571 1572 1573 1574 1575 1576
 * @adapter: board private structure to initialize
 *
 * We allocate one ring per queue at run-time since we don't know the
 * number of queues at compile-time.  The polling_netdev array is
 * intended for Multiqueue, but should work fine with a single queue.
 **/
1577
static int iavf_alloc_queues(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
1578
{
1579 1580
	int i, num_active_queues;

1581 1582 1583 1584 1585 1586 1587
	/* If we're in reset reallocating queues we don't actually know yet for
	 * certain the PF gave us the number of queues we asked for but we'll
	 * assume it did.  Once basic reset is finished we'll confirm once we
	 * start negotiating config with PF.
	 */
	if (adapter->num_req_queues)
		num_active_queues = adapter->num_req_queues;
1588 1589 1590
	else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
		 adapter->num_tc)
		num_active_queues = adapter->ch_config.total_qps;
1591 1592 1593 1594 1595
	else
		num_active_queues = min_t(int,
					  adapter->vsi_res->num_queue_pairs,
					  (int)(num_online_cpus()));

Greg Rose's avatar
Greg Rose committed
1596

1597
	adapter->tx_rings = kcalloc(num_active_queues,
1598
				    sizeof(struct iavf_ring), GFP_KERNEL);
1599 1600
	if (!adapter->tx_rings)
		goto err_out;
1601
	adapter->rx_rings = kcalloc(num_active_queues,
1602
				    sizeof(struct iavf_ring), GFP_KERNEL);
1603 1604 1605
	if (!adapter->rx_rings)
		goto err_out;

1606
	for (i = 0; i < num_active_queues; i++) {
1607 1608
		struct iavf_ring *tx_ring;
		struct iavf_ring *rx_ring;
Greg Rose's avatar
Greg Rose committed
1609

1610
		tx_ring = &adapter->tx_rings[i];
Greg Rose's avatar
Greg Rose committed
1611 1612 1613 1614

		tx_ring->queue_index = i;
		tx_ring->netdev = adapter->netdev;
		tx_ring->dev = &adapter->pdev->dev;
1615
		tx_ring->count = adapter->tx_desc_count;
1616
		tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1617
		if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1618
			tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
Greg Rose's avatar
Greg Rose committed
1619

1620
		rx_ring = &adapter->rx_rings[i];
Greg Rose's avatar
Greg Rose committed
1621 1622 1623
		rx_ring->queue_index = i;
		rx_ring->netdev = adapter->netdev;
		rx_ring->dev = &adapter->pdev->dev;
1624
		rx_ring->count = adapter->rx_desc_count;
1625
		rx_ring->itr_setting = IAVF_ITR_RX_DEF;
Greg Rose's avatar
Greg Rose committed
1626 1627
	}

1628 1629
	adapter->num_active_queues = num_active_queues;

1630 1631
	iavf_set_queue_vlan_tag_loc(adapter);

Greg Rose's avatar
Greg Rose committed
1632 1633 1634
	return 0;

err_out:
1635
	iavf_free_queues(adapter);
Greg Rose's avatar
Greg Rose committed
1636 1637 1638 1639
	return -ENOMEM;
}

/**
1640
 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
Greg Rose's avatar
Greg Rose committed
1641 1642 1643 1644 1645
 * @adapter: board private structure to initialize
 *
 * Attempt to configure the interrupts using the best available
 * capabilities of the hardware and the kernel.
 **/
1646
static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
1647 1648 1649 1650 1651 1652 1653 1654 1655
{
	int vector, v_budget;
	int pairs = 0;
	int err = 0;

	if (!adapter->vsi_res) {
		err = -EIO;
		goto out;
	}
1656
	pairs = adapter->num_active_queues;
Greg Rose's avatar
Greg Rose committed
1657

1658 1659 1660 1661
	/* It's easy to be greedy for MSI-X vectors, but it really doesn't do
	 * us much good if we have more vectors than CPUs. However, we already
	 * limit the total number of queues by the number of CPUs so we do not
	 * need any further limiting here.
Greg Rose's avatar
Greg Rose committed
1662
	 */
1663 1664
	v_budget = min_t(int, pairs + NONQ_VECS,
			 (int)adapter->vf_res->max_vectors);
Greg Rose's avatar
Greg Rose committed
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675

	adapter->msix_entries = kcalloc(v_budget,
					sizeof(struct msix_entry), GFP_KERNEL);
	if (!adapter->msix_entries) {
		err = -ENOMEM;
		goto out;
	}

	for (vector = 0; vector < v_budget; vector++)
		adapter->msix_entries[vector].entry = vector;

1676
	err = iavf_acquire_msix_vectors(adapter, v_budget);
Greg Rose's avatar
Greg Rose committed
1677 1678

out:
1679 1680
	netif_set_real_num_rx_queues(adapter->netdev, pairs);
	netif_set_real_num_tx_queues(adapter->netdev, pairs);
Greg Rose's avatar
Greg Rose committed
1681 1682 1683
	return err;
}

1684
/**
1685
 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1686
 * @adapter: board private structure
1687 1688
 *
 * Return 0 on success, negative on failure
1689
 **/
1690
static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1691
{
1692 1693
	struct iavf_aqc_get_set_rss_key_data *rss_key =
		(struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1694
	struct iavf_hw *hw = &adapter->hw;
1695
	enum iavf_status status;
1696

1697
	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1698
		/* bail because we already have a command pending */
1699
		dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1700
			adapter->current_op);
1701
		return -EBUSY;
1702 1703
	}

1704 1705
	status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
	if (status) {
1706
		dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1707
			iavf_stat_str(hw, status),
1708
			iavf_aq_str(hw, hw->aq.asq_last_status));
1709
		return iavf_status_to_errno(status);
1710

1711
	}
1712

1713 1714 1715
	status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
				     adapter->rss_lut, adapter->rss_lut_size);
	if (status) {
1716
		dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1717
			iavf_stat_str(hw, status),
1718
			iavf_aq_str(hw, hw->aq.asq_last_status));
1719
		return iavf_status_to_errno(status);
1720 1721
	}

1722
	return 0;
1723

1724 1725 1726
}

/**
1727
 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1728
 * @adapter: board private structure
1729 1730
 *
 * Returns 0 on success, negative on failure
1731
 **/
1732
static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1733
{
1734
	struct iavf_hw *hw = &adapter->hw;
1735
	u32 *dw;
1736
	u16 i;
1737

1738 1739
	dw = (u32 *)adapter->rss_key;
	for (i = 0; i <= adapter->rss_key_size / 4; i++)
1740
		wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1741

1742 1743
	dw = (u32 *)adapter->rss_lut;
	for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1744
		wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1745

1746
	iavf_flush(hw);
1747 1748 1749 1750 1751

	return 0;
}

/**
1752
 * iavf_config_rss - Configure RSS keys and lut
1753
 * @adapter: board private structure
1754 1755 1756
 *
 * Returns 0 on success, negative on failure
 **/
1757
int iavf_config_rss(struct iavf_adapter *adapter)
1758 1759
{

1760
	if (RSS_PF(adapter)) {
1761 1762
		adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
					IAVF_FLAG_AQ_SET_RSS_KEY;
1763 1764
		return 0;
	} else if (RSS_AQ(adapter)) {
1765
		return iavf_config_rss_aq(adapter);
1766
	} else {
1767
		return iavf_config_rss_reg(adapter);
1768
	}
1769 1770
}

1771
/**
1772
 * iavf_fill_rss_lut - Fill the lut with default values
1773
 * @adapter: board private structure
1774
 **/
1775
static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1776 1777 1778
{
	u16 i;

1779 1780
	for (i = 0; i < adapter->rss_lut_size; i++)
		adapter->rss_lut[i] = i % adapter->num_active_queues;
1781 1782 1783
}

/**
1784
 * iavf_init_rss - Prepare for RSS
1785
 * @adapter: board private structure
1786 1787
 *
 * Return 0 on success, negative on failure
1788
 **/
1789
static int iavf_init_rss(struct iavf_adapter *adapter)
1790
{
1791
	struct iavf_hw *hw = &adapter->hw;
1792

1793 1794
	if (!RSS_PF(adapter)) {
		/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1795
		if (adapter->vf_res->vf_cap_flags &
1796
		    VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1797
			adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1798
		else
1799
			adapter->hena = IAVF_DEFAULT_RSS_HENA;
1800

1801 1802
		wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
		wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1803
	}
1804

1805
	iavf_fill_rss_lut(adapter);
1806
	netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1807

1808
	return iavf_config_rss(adapter);
1809 1810
}

Greg Rose's avatar
Greg Rose committed
1811
/**
1812
 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
Greg Rose's avatar
Greg Rose committed
1813 1814 1815 1816 1817
 * @adapter: board private structure to initialize
 *
 * We allocate one q_vector per queue interrupt.  If allocation fails we
 * return -ENOMEM.
 **/
1818
static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
1819
{
1820
	int q_idx = 0, num_q_vectors;
1821
	struct iavf_q_vector *q_vector;
Greg Rose's avatar
Greg Rose committed
1822 1823

	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1824
	adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1825 1826
				     GFP_KERNEL);
	if (!adapter->q_vectors)
Alan Cox's avatar
Alan Cox committed
1827
		return -ENOMEM;
Greg Rose's avatar
Greg Rose committed
1828 1829

	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1830
		q_vector = &adapter->q_vectors[q_idx];
Greg Rose's avatar
Greg Rose committed
1831 1832 1833
		q_vector->adapter = adapter;
		q_vector->vsi = &adapter->vsi;
		q_vector->v_idx = q_idx;
1834
		q_vector->reg_idx = q_idx;
1835
		cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
Greg Rose's avatar
Greg Rose committed
1836
		netif_napi_add(adapter->netdev, &q_vector->napi,
1837
			       iavf_napi_poll, NAPI_POLL_WEIGHT);
Greg Rose's avatar
Greg Rose committed
1838 1839 1840 1841 1842 1843
	}

	return 0;
}

/**
1844
 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
Greg Rose's avatar
Greg Rose committed
1845 1846 1847 1848 1849 1850
 * @adapter: board private structure to initialize
 *
 * This function frees the memory allocated to the q_vectors.  In addition if
 * NAPI is enabled it will delete any references to the NAPI struct prior
 * to freeing the q_vector.
 **/
1851
static void iavf_free_q_vectors(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
1852 1853 1854 1855
{
	int q_idx, num_q_vectors;
	int napi_vectors;

1856 1857 1858
	if (!adapter->q_vectors)
		return;

Greg Rose's avatar
Greg Rose committed
1859
	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1860
	napi_vectors = adapter->num_active_queues;
Greg Rose's avatar
Greg Rose committed
1861 1862

	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1863
		struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1864

Greg Rose's avatar
Greg Rose committed
1865 1866 1867
		if (q_idx < napi_vectors)
			netif_napi_del(&q_vector->napi);
	}
1868
	kfree(adapter->q_vectors);
1869
	adapter->q_vectors = NULL;
Greg Rose's avatar
Greg Rose committed
1870 1871 1872
}

/**
1873
 * iavf_reset_interrupt_capability - Reset MSIX setup
Greg Rose's avatar
Greg Rose committed
1874 1875 1876
 * @adapter: board private structure
 *
 **/
1877
void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
1878
{
1879 1880 1881
	if (!adapter->msix_entries)
		return;

Greg Rose's avatar
Greg Rose committed
1882 1883 1884 1885 1886 1887
	pci_disable_msix(adapter->pdev);
	kfree(adapter->msix_entries);
	adapter->msix_entries = NULL;
}

/**
1888
 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
Greg Rose's avatar
Greg Rose committed
1889 1890 1891
 * @adapter: board private structure to initialize
 *
 **/
1892
int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
1893 1894 1895
{
	int err;

1896
	err = iavf_alloc_queues(adapter);
1897 1898 1899 1900 1901 1902
	if (err) {
		dev_err(&adapter->pdev->dev,
			"Unable to allocate memory for queues\n");
		goto err_alloc_queues;
	}

1903
	rtnl_lock();
1904
	err = iavf_set_interrupt_capability(adapter);
1905
	rtnl_unlock();
Greg Rose's avatar
Greg Rose committed
1906 1907 1908 1909 1910 1911
	if (err) {
		dev_err(&adapter->pdev->dev,
			"Unable to setup interrupt capabilities\n");
		goto err_set_interrupt;
	}

1912
	err = iavf_alloc_q_vectors(adapter);
Greg Rose's avatar
Greg Rose committed
1913 1914 1915 1916 1917 1918
	if (err) {
		dev_err(&adapter->pdev->dev,
			"Unable to allocate memory for queue vectors\n");
		goto err_alloc_q_vectors;
	}

1919 1920 1921 1922 1923 1924 1925 1926 1927 1928
	/* If we've made it so far while ADq flag being ON, then we haven't
	 * bailed out anywhere in middle. And ADq isn't just enabled but actual
	 * resources have been allocated in the reset path.
	 * Now we can truly claim that ADq is enabled.
	 */
	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
	    adapter->num_tc)
		dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
			 adapter->num_tc);

Greg Rose's avatar
Greg Rose committed
1929
	dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1930 1931
		 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
		 adapter->num_active_queues);
Greg Rose's avatar
Greg Rose committed
1932 1933 1934

	return 0;
err_alloc_q_vectors:
1935
	iavf_reset_interrupt_capability(adapter);
Greg Rose's avatar
Greg Rose committed
1936
err_set_interrupt:
1937
	iavf_free_queues(adapter);
1938
err_alloc_queues:
Greg Rose's avatar
Greg Rose committed
1939 1940 1941
	return err;
}

1942
/**
1943
 * iavf_free_rss - Free memory used by RSS structs
1944
 * @adapter: board private structure
1945
 **/
1946
static void iavf_free_rss(struct iavf_adapter *adapter)
1947
{
1948 1949
	kfree(adapter->rss_key);
	adapter->rss_key = NULL;
1950

1951 1952
	kfree(adapter->rss_lut);
	adapter->rss_lut = NULL;
1953 1954
}

1955
/**
1956
 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1957 1958 1959 1960
 * @adapter: board private structure
 *
 * Returns 0 on success, negative on failure
 **/
1961
static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1962 1963 1964 1965 1966
{
	struct net_device *netdev = adapter->netdev;
	int err;

	if (netif_running(netdev))
1967 1968 1969 1970 1971
		iavf_free_traffic_irqs(adapter);
	iavf_free_misc_irq(adapter);
	iavf_reset_interrupt_capability(adapter);
	iavf_free_q_vectors(adapter);
	iavf_free_queues(adapter);
1972

1973
	err =  iavf_init_interrupt_scheme(adapter);
1974 1975 1976 1977 1978
	if (err)
		goto err;

	netif_tx_stop_all_queues(netdev);

1979
	err = iavf_request_misc_irq(adapter);
1980 1981 1982
	if (err)
		goto err;

1983
	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1984

1985
	iavf_map_rings_to_vectors(adapter);
1986 1987 1988 1989
err:
	return err;
}

Greg Rose's avatar
Greg Rose committed
1990
/**
1991 1992 1993 1994 1995 1996 1997
 * iavf_process_aq_command - process aq_required flags
 * and sends aq command
 * @adapter: pointer to iavf adapter structure
 *
 * Returns 0 on success
 * Returns error code if no command was sent
 * or error code if the command failed.
Greg Rose's avatar
Greg Rose committed
1998
 **/
1999
static int iavf_process_aq_command(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
2000
{
2001 2002
	if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
		return iavf_send_vf_config_msg(adapter);
2003 2004
	if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
		return iavf_send_vf_offload_vlan_v2_msg(adapter);
2005 2006
	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
		iavf_disable_queues(adapter);
2007
		return 0;
2008 2009
	}

2010 2011
	if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
		iavf_map_queues(adapter);
2012
		return 0;
Greg Rose's avatar
Greg Rose committed
2013 2014
	}

2015 2016
	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
		iavf_add_ether_addrs(adapter);
2017
		return 0;
Greg Rose's avatar
Greg Rose committed
2018 2019
	}

2020 2021
	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
		iavf_add_vlans(adapter);
2022
		return 0;
Greg Rose's avatar
Greg Rose committed
2023 2024
	}

2025 2026
	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
		iavf_del_ether_addrs(adapter);
2027
		return 0;
Greg Rose's avatar
Greg Rose committed
2028 2029
	}

2030 2031
	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
		iavf_del_vlans(adapter);
2032
		return 0;
Greg Rose's avatar
Greg Rose committed
2033 2034
	}

2035 2036
	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
		iavf_enable_vlan_stripping(adapter);
2037
		return 0;
2038 2039
	}

2040 2041
	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
		iavf_disable_vlan_stripping(adapter);
2042
		return 0;
2043 2044
	}

2045 2046
	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
		iavf_configure_queues(adapter);
2047
		return 0;
Greg Rose's avatar
Greg Rose committed
2048 2049
	}

2050 2051
	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
		iavf_enable_queues(adapter);
2052
		return 0;
Greg Rose's avatar
Greg Rose committed
2053 2054
	}

2055
	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
2056 2057 2058 2059
		/* This message goes straight to the firmware, not the
		 * PF, so we don't have to set current_op as we will
		 * not get a response through the ARQ.
		 */
2060
		adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
2061
		return 0;
2062
	}
2063 2064
	if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
		iavf_get_hena(adapter);
2065
		return 0;
2066
	}
2067 2068
	if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
		iavf_set_hena(adapter);
2069
		return 0;
2070
	}
2071 2072
	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
		iavf_set_rss_key(adapter);
2073
		return 0;
2074
	}
2075 2076
	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
		iavf_set_rss_lut(adapter);
2077
		return 0;
2078
	}
2079

2080 2081
	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
		iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
2082
				       FLAG_VF_MULTICAST_PROMISC);
2083
		return 0;
2084 2085
	}

2086 2087
	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
		iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
2088
		return 0;
2089
	}
2090
	if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
2091 2092
	    (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
		iavf_set_promiscuous(adapter, 0);
2093
		return 0;
2094
	}
2095

2096 2097
	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
		iavf_enable_channels(adapter);
2098
		return 0;
2099 2100
	}

2101 2102
	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
		iavf_disable_channels(adapter);
2103
		return 0;
2104
	}
2105 2106
	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
		iavf_add_cloud_filter(adapter);
2107
		return 0;
2108 2109
	}

2110 2111
	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
		iavf_del_cloud_filter(adapter);
2112 2113
		return 0;
	}
2114 2115 2116 2117 2118 2119 2120 2121
	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
		iavf_del_cloud_filter(adapter);
		return 0;
	}
	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
		iavf_add_cloud_filter(adapter);
		return 0;
	}
2122 2123 2124 2125 2126 2127 2128 2129
	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
		iavf_add_fdir_filter(adapter);
		return IAVF_SUCCESS;
	}
	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
		iavf_del_fdir_filter(adapter);
		return IAVF_SUCCESS;
	}
2130 2131 2132 2133 2134 2135 2136 2137
	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
		iavf_add_adv_rss_cfg(adapter);
		return 0;
	}
	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
		iavf_del_adv_rss_cfg(adapter);
		return 0;
	}
2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
		iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
		return 0;
	}
	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
		iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
		return 0;
	}
	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
		iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
		return 0;
	}
	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
		iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
		return 0;
	}
	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
		iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
		return 0;
	}
	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
		iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
		return 0;
	}
	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
		iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
		return 0;
	}
	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
		iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
		return 0;
	}

2171 2172 2173 2174 2175
	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
		iavf_request_stats(adapter);
		return 0;
	}

2176 2177 2178
	return -EAGAIN;
}

2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263
/**
 * iavf_set_vlan_offload_features - set VLAN offload configuration
 * @adapter: board private structure
 * @prev_features: previous features used for comparison
 * @features: updated features used for configuration
 *
 * Set the aq_required bit(s) based on the requested features passed in to
 * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
 * the watchdog if any changes are requested to expedite the request via
 * virtchnl.
 **/
void
iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
			       netdev_features_t prev_features,
			       netdev_features_t features)
{
	bool enable_stripping = true, enable_insertion = true;
	u16 vlan_ethertype = 0;
	u64 aq_required = 0;

	/* keep cases separate because one ethertype for offloads can be
	 * disabled at the same time as another is disabled, so check for an
	 * enabled ethertype first, then check for disabled. Default to
	 * ETH_P_8021Q so an ethertype is specified if disabling insertion and
	 * stripping.
	 */
	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
		vlan_ethertype = ETH_P_8021AD;
	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
		vlan_ethertype = ETH_P_8021Q;
	else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
		vlan_ethertype = ETH_P_8021AD;
	else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
		vlan_ethertype = ETH_P_8021Q;
	else
		vlan_ethertype = ETH_P_8021Q;

	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
		enable_stripping = false;
	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
		enable_insertion = false;

	if (VLAN_ALLOWED(adapter)) {
		/* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
		 * stripping via virtchnl. VLAN insertion can be toggled on the
		 * netdev, but it doesn't require a virtchnl message
		 */
		if (enable_stripping)
			aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
		else
			aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;

	} else if (VLAN_V2_ALLOWED(adapter)) {
		switch (vlan_ethertype) {
		case ETH_P_8021Q:
			if (enable_stripping)
				aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
			else
				aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;

			if (enable_insertion)
				aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
			else
				aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
			break;
		case ETH_P_8021AD:
			if (enable_stripping)
				aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
			else
				aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;

			if (enable_insertion)
				aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
			else
				aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
			break;
		}
	}

	if (aq_required) {
		adapter->aq_required |= aq_required;
		mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
	}
}

2264 2265 2266 2267 2268 2269
/**
 * iavf_startup - first step of driver startup
 * @adapter: board private structure
 *
 * Function process __IAVF_STARTUP driver state.
 * When success the state is changed to __IAVF_INIT_VERSION_CHECK
2270
 * when fails the state is changed to __IAVF_INIT_FAILED
2271
 **/
2272
static void iavf_startup(struct iavf_adapter *adapter)
2273 2274 2275
{
	struct pci_dev *pdev = adapter->pdev;
	struct iavf_hw *hw = &adapter->hw;
2276 2277
	enum iavf_status status;
	int ret;
2278 2279 2280 2281 2282 2283

	WARN_ON(adapter->state != __IAVF_STARTUP);

	/* driver loaded, probe complete */
	adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2284 2285 2286
	status = iavf_set_mac_type(hw);
	if (status) {
		dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
2287 2288 2289
		goto err;
	}

2290 2291
	ret = iavf_check_reset_complete(hw);
	if (ret) {
2292
		dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2293
			 ret);
2294 2295 2296 2297 2298 2299 2300
		goto err;
	}
	hw->aq.num_arq_entries = IAVF_AQ_LEN;
	hw->aq.num_asq_entries = IAVF_AQ_LEN;
	hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
	hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;

2301 2302 2303 2304
	status = iavf_init_adminq(hw);
	if (status) {
		dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
			status);
2305 2306
		goto err;
	}
2307 2308 2309
	ret = iavf_send_api_ver(adapter);
	if (ret) {
		dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2310 2311 2312
		iavf_shutdown_adminq(hw);
		goto err;
	}
2313
	iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2314
	return;
2315
err:
2316
	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2317 2318 2319 2320 2321 2322 2323 2324
}

/**
 * iavf_init_version_check - second step of driver startup
 * @adapter: board private structure
 *
 * Function process __IAVF_INIT_VERSION_CHECK driver state.
 * When success the state is changed to __IAVF_INIT_GET_RESOURCES
2325
 * when fails the state is changed to __IAVF_INIT_FAILED
2326
 **/
2327
static void iavf_init_version_check(struct iavf_adapter *adapter)
2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
{
	struct pci_dev *pdev = adapter->pdev;
	struct iavf_hw *hw = &adapter->hw;
	int err = -EAGAIN;

	WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);

	if (!iavf_asq_done(hw)) {
		dev_err(&pdev->dev, "Admin queue command never completed\n");
		iavf_shutdown_adminq(hw);
2338
		iavf_change_state(adapter, __IAVF_STARTUP);
2339 2340 2341 2342 2343 2344
		goto err;
	}

	/* aq msg sent, awaiting reply */
	err = iavf_verify_api_ver(adapter);
	if (err) {
2345
		if (err == -EALREADY)
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360
			err = iavf_send_api_ver(adapter);
		else
			dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
				adapter->pf_version.major,
				adapter->pf_version.minor,
				VIRTCHNL_VERSION_MAJOR,
				VIRTCHNL_VERSION_MINOR);
		goto err;
	}
	err = iavf_send_vf_config_msg(adapter);
	if (err) {
		dev_err(&pdev->dev, "Unable to send config request (%d)\n",
			err);
		goto err;
	}
2361
	iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2362
	return;
2363
err:
2364
	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2365 2366
}

2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
/**
 * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
 * @adapter: board private structure
 */
int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
{
	int i, num_req_queues = adapter->num_req_queues;
	struct iavf_vsi *vsi = &adapter->vsi;

	for (i = 0; i < adapter->vf_res->num_vsis; i++) {
		if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
			adapter->vsi_res = &adapter->vf_res->vsi_res[i];
	}
	if (!adapter->vsi_res) {
		dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
		return -ENODEV;
	}

	if (num_req_queues &&
	    num_req_queues > adapter->vsi_res->num_queue_pairs) {
		/* Problem.  The PF gave us fewer queues than what we had
		 * negotiated in our request.  Need a reset to see if we can't
		 * get back to a working state.
		 */
		dev_err(&adapter->pdev->dev,
			"Requested %d queues, but PF only gave us %d.\n",
			num_req_queues,
			adapter->vsi_res->num_queue_pairs);
2395
		adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
		adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
		iavf_schedule_reset(adapter);

		return -EAGAIN;
	}
	adapter->num_req_queues = 0;
	adapter->vsi.id = adapter->vsi_res->vsi_id;

	adapter->vsi.back = adapter;
	adapter->vsi.base_vector = 1;
	vsi->netdev = adapter->netdev;
	vsi->qs_handle = adapter->vsi_res->qset_handle;
	if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
		adapter->rss_key_size = adapter->vf_res->rss_key_size;
		adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
	} else {
		adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
		adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
	}

	return 0;
}

2419 2420 2421 2422 2423 2424 2425
/**
 * iavf_init_get_resources - third step of driver startup
 * @adapter: board private structure
 *
 * Function process __IAVF_INIT_GET_RESOURCES driver state and
 * finishes driver initialization procedure.
 * When success the state is changed to __IAVF_DOWN
2426
 * when fails the state is changed to __IAVF_INIT_FAILED
2427
 **/
2428
static void iavf_init_get_resources(struct iavf_adapter *adapter)
2429 2430 2431
{
	struct pci_dev *pdev = adapter->pdev;
	struct iavf_hw *hw = &adapter->hw;
2432
	int err;
2433 2434 2435 2436

	WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
	/* aq msg sent, awaiting reply */
	if (!adapter->vf_res) {
2437 2438 2439 2440
		adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
					  GFP_KERNEL);
		if (!adapter->vf_res) {
			err = -ENOMEM;
2441
			goto err;
2442
		}
2443 2444
	}
	err = iavf_get_vf_config(adapter);
2445
	if (err == -EALREADY) {
2446
		err = iavf_send_vf_config_msg(adapter);
2447
		goto err;
2448 2449
	} else if (err == -EINVAL) {
		/* We only get -EINVAL if the device is in a very bad
2450 2451 2452 2453 2454
		 * state or if we've been disabled for previous bad
		 * behavior. Either way, we're done now.
		 */
		iavf_shutdown_adminq(hw);
		dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2455
		return;
2456 2457 2458 2459 2460 2461
	}
	if (err) {
		dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
		goto err_alloc;
	}

2462
	err = iavf_parse_vf_resource_msg(adapter);
2463 2464
	if (err) {
		dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2465 2466 2467
			err);
		goto err_alloc;
	}
2468 2469 2470
	/* Some features require additional messages to negotiate extended
	 * capabilities. These are processed in sequence by the
	 * __IAVF_INIT_EXTENDED_CAPS driver state.
2471
	 */
2472 2473 2474
	adapter->extended_caps = IAVF_EXTENDED_CAPS;

	iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2475 2476 2477 2478 2479 2480 2481 2482 2483 2484
	return;

err_alloc:
	kfree(adapter->vf_res);
	adapter->vf_res = NULL;
err:
	iavf_change_state(adapter, __IAVF_INIT_FAILED);
}

/**
2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512
 * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
 * @adapter: board private structure
 *
 * Function processes send of the extended VLAN V2 capability message to the
 * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
 * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
 */
static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
{
	int ret;

	WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));

	ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
	if (ret && ret == -EOPNOTSUPP) {
		/* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
		 * we did not send the capability exchange message and do not
		 * expect a response.
		 */
		adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
	}

	/* We sent the message, so move on to the next step */
	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
}

/**
 * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2513 2514
 * @adapter: board private structure
 *
2515 2516
 * Function processes receipt of the extended VLAN V2 capability message from
 * the PF.
2517
 **/
2518
static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2519 2520 2521
{
	int ret;

2522
	WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2523 2524 2525 2526

	memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));

	ret = iavf_get_vf_vlan_v2_caps(adapter);
2527
	if (ret)
2528 2529
		goto err;

2530 2531
	/* We've processed receipt of the VLAN V2 caps message */
	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2532 2533
	return;
err:
2534 2535 2536 2537
	/* We didn't receive a reply. Make sure we try sending again when
	 * __IAVF_INIT_FAILED attempts to recover.
	 */
	adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2538 2539 2540
	iavf_change_state(adapter, __IAVF_INIT_FAILED);
}

2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
/**
 * iavf_init_process_extended_caps - Part of driver startup
 * @adapter: board private structure
 *
 * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
 * handles negotiating capabilities for features which require an additional
 * message.
 *
 * Once all extended capabilities exchanges are finished, the driver will
 * transition into __IAVF_INIT_CONFIG_ADAPTER.
 */
static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
{
	WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);

	/* Process capability exchange for VLAN V2 */
	if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
		iavf_init_send_offload_vlan_v2_caps(adapter);
		return;
	} else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
		iavf_init_recv_offload_vlan_v2_caps(adapter);
		return;
	}

	/* When we reach here, no further extended capabilities exchanges are
	 * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
	 */
	iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
}

2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588
/**
 * iavf_init_config_adapter - last part of driver startup
 * @adapter: board private structure
 *
 * After all the supported capabilities are negotiated, then the
 * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
 */
static void iavf_init_config_adapter(struct iavf_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	struct pci_dev *pdev = adapter->pdev;
	int err;

	WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);

	if (iavf_process_config(adapter))
		goto err;

2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606
	adapter->current_op = VIRTCHNL_OP_UNKNOWN;

	adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;

	netdev->netdev_ops = &iavf_netdev_ops;
	iavf_set_ethtool_ops(netdev);
	netdev->watchdog_timeo = 5 * HZ;

	/* MTU range: 68 - 9710 */
	netdev->min_mtu = ETH_MIN_MTU;
	netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;

	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
			 adapter->hw.mac.addr);
		eth_hw_addr_random(netdev);
		ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
	} else {
2607
		eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2608 2609 2610
		ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
	}

2611 2612
	adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET;

2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646
	adapter->tx_desc_count = IAVF_DEFAULT_TXD;
	adapter->rx_desc_count = IAVF_DEFAULT_RXD;
	err = iavf_init_interrupt_scheme(adapter);
	if (err)
		goto err_sw_init;
	iavf_map_rings_to_vectors(adapter);
	if (adapter->vf_res->vf_cap_flags &
		VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
		adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;

	err = iavf_request_misc_irq(adapter);
	if (err)
		goto err_sw_init;

	netif_carrier_off(netdev);
	adapter->link_up = false;

	/* set the semaphore to prevent any callbacks after device registration
	 * up to time when state of driver will be set to __IAVF_DOWN
	 */
	rtnl_lock();
	if (!adapter->netdev_registered) {
		err = register_netdevice(netdev);
		if (err) {
			rtnl_unlock();
			goto err_register;
		}
	}

	adapter->netdev_registered = true;

	netif_tx_stop_all_queues(netdev);
	if (CLIENT_ALLOWED(adapter)) {
		err = iavf_lan_add_device(adapter);
2647
		if (err)
2648 2649 2650 2651 2652 2653 2654
			dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
				 err);
	}
	dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
	if (netdev->features & NETIF_F_GRO)
		dev_info(&pdev->dev, "GRO is enabled\n");

2655
	iavf_change_state(adapter, __IAVF_DOWN);
2656 2657 2658 2659 2660 2661 2662 2663
	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
	rtnl_unlock();

	iavf_misc_irq_enable(adapter);
	wake_up(&adapter->down_waitqueue);

	adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
	adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2664 2665
	if (!adapter->rss_key || !adapter->rss_lut) {
		err = -ENOMEM;
2666
		goto err_mem;
2667
	}
2668 2669 2670 2671 2672
	if (RSS_AQ(adapter))
		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
	else
		iavf_init_rss(adapter);

2673 2674 2675 2676
	if (VLAN_V2_ALLOWED(adapter))
		/* request initial VLAN offload settings */
		iavf_set_vlan_offload_features(adapter, 0, netdev->features);

2677
	return;
2678 2679 2680 2681 2682 2683 2684
err_mem:
	iavf_free_rss(adapter);
err_register:
	iavf_free_misc_irq(adapter);
err_sw_init:
	iavf_reset_interrupt_capability(adapter);
err:
2685
	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2686 2687
}

2688 2689 2690 2691 2692 2693 2694 2695
/**
 * iavf_watchdog_task - Periodic call-back task
 * @work: pointer to work_struct
 **/
static void iavf_watchdog_task(struct work_struct *work)
{
	struct iavf_adapter *adapter = container_of(work,
						    struct iavf_adapter,
2696
						    watchdog_task.work);
2697 2698 2699
	struct iavf_hw *hw = &adapter->hw;
	u32 reg_val;

2700 2701 2702 2703
	if (!mutex_trylock(&adapter->crit_lock)) {
		if (adapter->state == __IAVF_REMOVE)
			return;

2704
		goto restart_watchdog;
2705
	}
2706

2707
	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2708
		iavf_change_state(adapter, __IAVF_COMM_FAILED);
2709

2710
	if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2711 2712
		adapter->aq_required = 0;
		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2713 2714 2715
		mutex_unlock(&adapter->crit_lock);
		queue_work(iavf_wq, &adapter->reset_task);
		return;
2716 2717
	}

2718
	switch (adapter->state) {
2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736
	case __IAVF_STARTUP:
		iavf_startup(adapter);
		mutex_unlock(&adapter->crit_lock);
		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
				   msecs_to_jiffies(30));
		return;
	case __IAVF_INIT_VERSION_CHECK:
		iavf_init_version_check(adapter);
		mutex_unlock(&adapter->crit_lock);
		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
				   msecs_to_jiffies(30));
		return;
	case __IAVF_INIT_GET_RESOURCES:
		iavf_init_get_resources(adapter);
		mutex_unlock(&adapter->crit_lock);
		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
				   msecs_to_jiffies(1));
		return;
2737 2738
	case __IAVF_INIT_EXTENDED_CAPS:
		iavf_init_process_extended_caps(adapter);
2739 2740 2741 2742 2743 2744 2745 2746 2747 2748
		mutex_unlock(&adapter->crit_lock);
		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
				   msecs_to_jiffies(1));
		return;
	case __IAVF_INIT_CONFIG_ADAPTER:
		iavf_init_config_adapter(adapter);
		mutex_unlock(&adapter->crit_lock);
		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
				   msecs_to_jiffies(1));
		return;
2749
	case __IAVF_INIT_FAILED:
2750 2751 2752 2753 2754 2755 2756 2757 2758
		if (test_bit(__IAVF_IN_REMOVE_TASK,
			     &adapter->crit_section)) {
			/* Do not update the state and do not reschedule
			 * watchdog task, iavf_remove should handle this state
			 * as it can loop forever
			 */
			mutex_unlock(&adapter->crit_lock);
			return;
		}
2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773
		if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
			dev_err(&adapter->pdev->dev,
				"Failed to communicate with PF; waiting before retry\n");
			adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
			iavf_shutdown_adminq(hw);
			mutex_unlock(&adapter->crit_lock);
			queue_delayed_work(iavf_wq,
					   &adapter->watchdog_task, (5 * HZ));
			return;
		}
		/* Try again from failed step*/
		iavf_change_state(adapter, adapter->last_state);
		mutex_unlock(&adapter->crit_lock);
		queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
		return;
2774
	case __IAVF_COMM_FAILED:
2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785
		if (test_bit(__IAVF_IN_REMOVE_TASK,
			     &adapter->crit_section)) {
			/* Set state to __IAVF_INIT_FAILED and perform remove
			 * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
			 * doesn't bring the state back to __IAVF_COMM_FAILED.
			 */
			iavf_change_state(adapter, __IAVF_INIT_FAILED);
			adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
			mutex_unlock(&adapter->crit_lock);
			return;
		}
2786 2787 2788 2789 2790
		reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
			  IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
		if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
		    reg_val == VIRTCHNL_VFR_COMPLETED) {
			/* A chance for redemption! */
2791 2792
			dev_err(&adapter->pdev->dev,
				"Hardware came out of reset. Attempting reinit.\n");
2793
			/* When init task contacts the PF and
2794 2795 2796
			 * gets everything set up again, it'll restart the
			 * watchdog for us. Down, boy. Sit. Stay. Woof.
			 */
2797 2798
			iavf_change_state(adapter, __IAVF_STARTUP);
			adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2799 2800 2801
		}
		adapter->aq_required = 0;
		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2802
		mutex_unlock(&adapter->crit_lock);
2803 2804 2805
		queue_delayed_work(iavf_wq,
				   &adapter->watchdog_task,
				   msecs_to_jiffies(10));
2806
		return;
2807
	case __IAVF_RESETTING:
2808
		mutex_unlock(&adapter->crit_lock);
2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821
		queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
		return;
	case __IAVF_DOWN:
	case __IAVF_DOWN_PENDING:
	case __IAVF_TESTING:
	case __IAVF_RUNNING:
		if (adapter->current_op) {
			if (!iavf_asq_done(hw)) {
				dev_dbg(&adapter->pdev->dev,
					"Admin queue timeout\n");
				iavf_send_api_ver(adapter);
			}
		} else {
2822 2823
			int ret = iavf_process_aq_command(adapter);

2824 2825
			/* An error will be returned if no commands were
			 * processed; use this opportunity to update stats
2826
			 * if the error isn't -ENOTSUPP
2827
			 */
2828
			if (ret && ret != -EOPNOTSUPP &&
2829 2830 2831
			    adapter->state == __IAVF_RUNNING)
				iavf_request_stats(adapter);
		}
2832 2833
		if (adapter->state == __IAVF_RUNNING)
			iavf_detect_recover_hung(&adapter->vsi);
2834 2835 2836
		break;
	case __IAVF_REMOVE:
	default:
2837
		mutex_unlock(&adapter->crit_lock);
2838
		return;
2839 2840
	}

2841
	/* check for hw reset */
2842
	reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2843
	if (!reg_val) {
2844 2845 2846
		adapter->flags |= IAVF_FLAG_RESET_PENDING;
		adapter->aq_required = 0;
		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2847 2848
		dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
		queue_work(iavf_wq, &adapter->reset_task);
2849 2850 2851 2852
		mutex_unlock(&adapter->crit_lock);
		queue_delayed_work(iavf_wq,
				   &adapter->watchdog_task, HZ * 2);
		return;
2853 2854 2855
	}

	schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2856
	mutex_unlock(&adapter->crit_lock);
2857
restart_watchdog:
2858 2859
	if (adapter->state >= __IAVF_DOWN)
		queue_work(iavf_wq, &adapter->adminq_task);
Greg Rose's avatar
Greg Rose committed
2860
	if (adapter->aq_required)
2861 2862
		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
				   msecs_to_jiffies(20));
Greg Rose's avatar
Greg Rose committed
2863
	else
2864
		queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
Greg Rose's avatar
Greg Rose committed
2865 2866
}

2867 2868 2869 2870 2871 2872 2873
/**
 * iavf_disable_vf - disable VF
 * @adapter: board private structure
 *
 * Set communication failed flag and free all resources.
 * NOTE: This function is expected to be called with crit_lock being held.
 **/
2874
static void iavf_disable_vf(struct iavf_adapter *adapter)
2875
{
2876 2877 2878
	struct iavf_mac_filter *f, *ftmp;
	struct iavf_vlan_filter *fv, *fvtmp;
	struct iavf_cloud_filter *cf, *cftmp;
2879

2880
	adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2881

2882 2883 2884 2885
	/* We don't use netif_running() because it may be true prior to
	 * ndo_open() returning, so we can't assume it means all our open
	 * tasks have finished, since we're not holding the rtnl_lock here.
	 */
2886
	if (adapter->state == __IAVF_RUNNING) {
2887
		set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2888 2889 2890
		netif_carrier_off(adapter->netdev);
		netif_tx_disable(adapter->netdev);
		adapter->link_up = false;
2891 2892 2893 2894 2895
		iavf_napi_disable_all(adapter);
		iavf_irq_disable(adapter);
		iavf_free_traffic_irqs(adapter);
		iavf_free_all_tx_resources(adapter);
		iavf_free_all_rx_resources(adapter);
2896 2897
	}

2898 2899
	spin_lock_bh(&adapter->mac_vlan_list_lock);

2900
	/* Delete all of the filters */
2901 2902 2903 2904 2905 2906 2907 2908 2909 2910
	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
		list_del(&f->list);
		kfree(f);
	}

	list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
		list_del(&fv->list);
		kfree(fv);
	}

2911 2912
	spin_unlock_bh(&adapter->mac_vlan_list_lock);

2913 2914 2915 2916 2917 2918 2919 2920
	spin_lock_bh(&adapter->cloud_filter_list_lock);
	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
		list_del(&cf->list);
		kfree(cf);
		adapter->num_cloud_filters--;
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);

2921 2922 2923
	iavf_free_misc_irq(adapter);
	iavf_reset_interrupt_capability(adapter);
	iavf_free_q_vectors(adapter);
2924
	iavf_free_queues(adapter);
2925
	memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2926
	iavf_shutdown_adminq(&adapter->hw);
2927
	adapter->netdev->flags &= ~IFF_UP;
2928
	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2929
	iavf_change_state(adapter, __IAVF_DOWN);
2930
	wake_up(&adapter->down_waitqueue);
2931 2932 2933
	dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
}

Greg Rose's avatar
Greg Rose committed
2934
/**
2935
 * iavf_reset_task - Call-back task to handle hardware reset
Greg Rose's avatar
Greg Rose committed
2936 2937 2938 2939 2940 2941
 * @work: pointer to work_struct
 *
 * During reset we need to shut down and reinitialize the admin queue
 * before we can use it to communicate with the PF again. We also clear
 * and reinit the rings because that context is lost as well.
 **/
2942
static void iavf_reset_task(struct work_struct *work)
Greg Rose's avatar
Greg Rose committed
2943
{
2944 2945
	struct iavf_adapter *adapter = container_of(work,
						      struct iavf_adapter,
2946
						      reset_task);
2947
	struct virtchnl_vf_resource *vfres = adapter->vf_res;
Mitch Williams's avatar
Mitch Williams committed
2948
	struct net_device *netdev = adapter->netdev;
2949
	struct iavf_hw *hw = &adapter->hw;
2950
	struct iavf_mac_filter *f, *ftmp;
2951
	struct iavf_cloud_filter *cf;
2952
	enum iavf_status status;
2953
	u32 reg_val;
Mitch Williams's avatar
Mitch Williams committed
2954
	int i = 0, err;
2955
	bool running;
Greg Rose's avatar
Greg Rose committed
2956

2957 2958 2959
	/* When device is being removed it doesn't make sense to run the reset
	 * task, just return in such a case.
	 */
2960 2961 2962
	if (!mutex_trylock(&adapter->crit_lock)) {
		if (adapter->state != __IAVF_REMOVE)
			queue_work(iavf_wq, &adapter->reset_task);
2963

2964 2965
		return;
	}
2966

2967
	while (!mutex_trylock(&adapter->client_lock))
2968
		usleep_range(500, 1000);
2969
	if (CLIENT_ENABLED(adapter)) {
2970 2971 2972 2973
		adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
				    IAVF_FLAG_CLIENT_NEEDS_CLOSE |
				    IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
				    IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2974
		cancel_delayed_work_sync(&adapter->client_task);
2975
		iavf_notify_client_close(&adapter->vsi, true);
2976
	}
2977 2978 2979
	iavf_misc_irq_disable(adapter);
	if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
		adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2980 2981 2982
		/* Restart the AQ here. If we have been reset but didn't
		 * detect it, or if the PF had to reinit, our AQ will be hosed.
		 */
2983 2984 2985
		iavf_shutdown_adminq(hw);
		iavf_init_adminq(hw);
		iavf_request_reset(adapter);
2986
	}
2987
	adapter->flags |= IAVF_FLAG_RESET_PENDING;
2988

2989
	/* poll until we see the reset actually happen */
2990
	for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2991 2992
		reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
			  IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2993
		if (!reg_val)
2994
			break;
2995
		usleep_range(5000, 10000);
2996
	}
2997
	if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2998
		dev_info(&adapter->pdev->dev, "Never saw reset\n");
2999 3000
		goto continue_reset; /* act like the reset happened */
	}
Greg Rose's avatar
Greg Rose committed
3001

3002
	/* wait until the reset is complete and the PF is responding to us */
3003
	for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3004
		/* sleep first to make sure a minimum wait time is met */
3005
		msleep(IAVF_RESET_WAIT_MS);
3006

3007 3008
		reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
			  IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3009
		if (reg_val == VIRTCHNL_VFR_VFACTIVE)
Greg Rose's avatar
Greg Rose committed
3010 3011
			break;
	}
3012

3013
	pci_set_master(adapter->pdev);
3014
	pci_restore_msi_state(adapter->pdev);
3015

3016
	if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
3017
		dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
3018
			reg_val);
3019
		iavf_disable_vf(adapter);
3020
		mutex_unlock(&adapter->client_lock);
3021
		mutex_unlock(&adapter->crit_lock);
3022
		return; /* Do not attempt to reinit. It's dead, Jim. */
Greg Rose's avatar
Greg Rose committed
3023
	}
3024 3025

continue_reset:
3026 3027 3028 3029
	/* We don't use netif_running() because it may be true prior to
	 * ndo_open() returning, so we can't assume it means all our open
	 * tasks have finished, since we're not holding the rtnl_lock here.
	 */
3030
	running = adapter->state == __IAVF_RUNNING;
3031 3032

	if (running) {
Mitch Williams's avatar
Mitch Williams committed
3033
		netif_carrier_off(netdev);
3034
		netif_tx_stop_all_queues(netdev);
3035
		adapter->link_up = false;
3036
		iavf_napi_disable_all(adapter);
Mitch Williams's avatar
Mitch Williams committed
3037
	}
3038
	iavf_irq_disable(adapter);
Mitch Williams's avatar
Mitch Williams committed
3039

3040
	iavf_change_state(adapter, __IAVF_RESETTING);
3041
	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3042 3043 3044 3045

	/* free the Tx/Rx rings and descriptors, might be better to just
	 * re-use them sometime in the future
	 */
3046 3047
	iavf_free_all_rx_resources(adapter);
	iavf_free_all_tx_resources(adapter);
Greg Rose's avatar
Greg Rose committed
3048

3049
	adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
Greg Rose's avatar
Greg Rose committed
3050
	/* kill and reinit the admin queue */
3051
	iavf_shutdown_adminq(hw);
3052
	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3053 3054
	status = iavf_init_adminq(hw);
	if (status) {
Mitch Williams's avatar
Mitch Williams committed
3055
		dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
3056 3057 3058
			 status);
		goto reset_err;
	}
3059 3060
	adapter->aq_required = 0;

3061 3062
	if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
	    (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3063
		err = iavf_reinit_interrupt_scheme(adapter);
3064 3065 3066
		if (err)
			goto reset_err;
	}
Greg Rose's avatar
Greg Rose committed
3067

3068 3069 3070 3071 3072 3073 3074 3075
	if (RSS_AQ(adapter)) {
		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
	} else {
		err = iavf_init_rss(adapter);
		if (err)
			goto reset_err;
	}

3076
	adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3077 3078 3079 3080 3081 3082 3083
	/* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
	 * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
	 * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
	 * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
	 * been successfully sent and negotiated
	 */
	adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
3084
	adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
Mitch Williams's avatar
Mitch Williams committed
3085

3086 3087
	spin_lock_bh(&adapter->mac_vlan_list_lock);

3088 3089 3090 3091 3092 3093 3094 3095 3096 3097
	/* Delete filter for the current MAC address, it could have
	 * been changed by the PF via administratively set MAC.
	 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
	 */
	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
		if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
			list_del(&f->list);
			kfree(f);
		}
	}
Mitch Williams's avatar
Mitch Williams committed
3098 3099 3100 3101
	/* re-add all MAC filters */
	list_for_each_entry(f, &adapter->mac_filter_list, list) {
		f->add = true;
	}
3102 3103
	spin_unlock_bh(&adapter->mac_vlan_list_lock);

3104 3105 3106 3107 3108 3109 3110 3111 3112 3113
	/* check if TCs are running and re-add all cloud filters */
	spin_lock_bh(&adapter->cloud_filter_list_lock);
	if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
	    adapter->num_tc) {
		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
			cf->add = true;
		}
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);

3114 3115 3116
	adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
	adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
	iavf_misc_irq_enable(adapter);
Greg Rose's avatar
Greg Rose committed
3117

3118 3119 3120
	bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
	bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);

3121
	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
Greg Rose's avatar
Greg Rose committed
3122

3123 3124 3125 3126
	/* We were running when the reset started, so we need to restore some
	 * state here.
	 */
	if (running) {
Greg Rose's avatar
Greg Rose committed
3127
		/* allocate transmit descriptors */
3128
		err = iavf_setup_all_tx_resources(adapter);
Greg Rose's avatar
Greg Rose committed
3129 3130 3131 3132
		if (err)
			goto reset_err;

		/* allocate receive descriptors */
3133
		err = iavf_setup_all_rx_resources(adapter);
Greg Rose's avatar
Greg Rose committed
3134 3135 3136
		if (err)
			goto reset_err;

3137 3138
		if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
		    (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3139
			err = iavf_request_traffic_irqs(adapter, netdev->name);
3140 3141 3142
			if (err)
				goto reset_err;

3143
			adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
3144 3145
		}

3146
		iavf_configure(adapter);
Greg Rose's avatar
Greg Rose committed
3147

3148 3149 3150
		/* iavf_up_complete() will switch device back
		 * to __IAVF_RUNNING
		 */
3151
		iavf_up_complete(adapter);
3152

3153
		iavf_irq_enable(adapter, true);
3154
	} else {
3155
		iavf_change_state(adapter, __IAVF_DOWN);
3156
		wake_up(&adapter->down_waitqueue);
Greg Rose's avatar
Greg Rose committed
3157
	}
3158 3159 3160

	adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;

3161 3162
	mutex_unlock(&adapter->client_lock);
	mutex_unlock(&adapter->crit_lock);
3163

Greg Rose's avatar
Greg Rose committed
3164 3165
	return;
reset_err:
3166 3167 3168 3169 3170 3171
	if (running) {
		set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
		iavf_free_traffic_irqs(adapter);
	}
	iavf_disable_vf(adapter);

3172 3173
	mutex_unlock(&adapter->client_lock);
	mutex_unlock(&adapter->crit_lock);
3174
	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
Greg Rose's avatar
Greg Rose committed
3175 3176 3177
}

/**
3178
 * iavf_adminq_task - worker thread to clean the admin queue
Greg Rose's avatar
Greg Rose committed
3179 3180
 * @work: pointer to work_struct containing our data
 **/
3181
static void iavf_adminq_task(struct work_struct *work)
Greg Rose's avatar
Greg Rose committed
3182
{
3183 3184
	struct iavf_adapter *adapter =
		container_of(work, struct iavf_adapter, adminq_task);
3185
	struct iavf_hw *hw = &adapter->hw;
3186
	struct iavf_arq_event_info event;
3187
	enum virtchnl_ops v_op;
3188
	enum iavf_status ret, v_ret;
3189
	u32 val, oldval;
Greg Rose's avatar
Greg Rose committed
3190 3191
	u16 pending;

3192
	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
3193
		goto out;
3194

3195 3196 3197 3198 3199 3200 3201 3202
	if (!mutex_trylock(&adapter->crit_lock)) {
		if (adapter->state == __IAVF_REMOVE)
			return;

		queue_work(iavf_wq, &adapter->adminq_task);
		goto out;
	}

3203
	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
Mitch Williams's avatar
Mitch Williams committed
3204
	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
3205
	if (!event.msg_buf)
3206
		goto out;
3207

Greg Rose's avatar
Greg Rose committed
3208
	do {
3209
		ret = iavf_clean_arq_element(hw, &event, &pending);
3210
		v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3211
		v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
3212 3213

		if (ret || !v_op)
Greg Rose's avatar
Greg Rose committed
3214 3215
			break; /* No event to process or error cleaning ARQ */

3216 3217
		iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
					 event.msg_len);
3218
		if (pending != 0)
3219
			memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
Greg Rose's avatar
Greg Rose committed
3220
	} while (pending);
3221
	mutex_unlock(&adapter->crit_lock);
Greg Rose's avatar
Greg Rose committed
3222

3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240
	if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
		if (adapter->netdev_registered ||
		    !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
			struct net_device *netdev = adapter->netdev;

			rtnl_lock();
			netdev_update_features(netdev);
			rtnl_unlock();
			/* Request VLAN offload settings */
			if (VLAN_V2_ALLOWED(adapter))
				iavf_set_vlan_offload_features
					(adapter, 0, netdev->features);

			iavf_set_queue_vlan_tag_loc(adapter);
		}

		adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
	}
3241
	if ((adapter->flags &
3242 3243
	     (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
	    adapter->state == __IAVF_RESETTING)
3244 3245
		goto freedom;

3246 3247
	/* check for error indications */
	val = rd32(hw, hw->aq.arq.len);
3248
	if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
3249
		goto freedom;
3250
	oldval = val;
3251
	if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
3252
		dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3253
		val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
3254
	}
3255
	if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
3256
		dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3257
		val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
3258
	}
3259
	if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
3260
		dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3261
		val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
3262 3263 3264 3265 3266 3267
	}
	if (oldval != val)
		wr32(hw, hw->aq.arq.len, val);

	val = rd32(hw, hw->aq.asq.len);
	oldval = val;
3268
	if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
3269
		dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3270
		val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
3271
	}
3272
	if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
3273
		dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3274
		val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
3275
	}
3276
	if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
3277
		dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3278
		val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
3279 3280 3281 3282
	}
	if (oldval != val)
		wr32(hw, hw->aq.asq.len, val);

3283
freedom:
3284 3285
	kfree(event.msg_buf);
out:
Greg Rose's avatar
Greg Rose committed
3286
	/* re-enable Admin queue interrupt cause */
3287
	iavf_misc_irq_enable(adapter);
Greg Rose's avatar
Greg Rose committed
3288 3289
}

3290
/**
3291
 * iavf_client_task - worker thread to perform client work
3292 3293 3294 3295 3296
 * @work: pointer to work_struct containing our data
 *
 * This task handles client interactions. Because client calls can be
 * reentrant, we can't handle them in the watchdog.
 **/
3297
static void iavf_client_task(struct work_struct *work)
3298
{
3299 3300
	struct iavf_adapter *adapter =
		container_of(work, struct iavf_adapter, client_task.work);
3301 3302 3303 3304 3305

	/* If we can't get the client bit, just give up. We'll be rescheduled
	 * later.
	 */

3306
	if (!mutex_trylock(&adapter->client_lock))
3307 3308
		return;

3309 3310 3311
	if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
		iavf_client_subtask(adapter);
		adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3312 3313
		goto out;
	}
3314 3315 3316
	if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
		iavf_notify_client_l2_params(&adapter->vsi);
		adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
3317 3318
		goto out;
	}
3319 3320 3321
	if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
		iavf_notify_client_close(&adapter->vsi, false);
		adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3322 3323
		goto out;
	}
3324 3325 3326
	if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
		iavf_notify_client_open(&adapter->vsi);
		adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
3327 3328
	}
out:
3329
	mutex_unlock(&adapter->client_lock);
3330 3331
}

Greg Rose's avatar
Greg Rose committed
3332
/**
3333
 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
Greg Rose's avatar
Greg Rose committed
3334 3335 3336 3337
 * @adapter: board private structure
 *
 * Free all transmit software resources
 **/
3338
void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
3339 3340 3341
{
	int i;

3342 3343 3344
	if (!adapter->tx_rings)
		return;

3345
	for (i = 0; i < adapter->num_active_queues; i++)
3346
		if (adapter->tx_rings[i].desc)
3347
			iavf_free_tx_resources(&adapter->tx_rings[i]);
Greg Rose's avatar
Greg Rose committed
3348 3349 3350
}

/**
3351
 * iavf_setup_all_tx_resources - allocate all queues Tx resources
Greg Rose's avatar
Greg Rose committed
3352 3353 3354 3355 3356 3357 3358 3359
 * @adapter: board private structure
 *
 * If this function returns with an error, then it's possible one or
 * more of the rings is populated (while the rest are not).  It is the
 * callers duty to clean those orphaned rings.
 *
 * Return 0 on success, negative on failure
 **/
3360
static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
3361 3362 3363
{
	int i, err = 0;

3364
	for (i = 0; i < adapter->num_active_queues; i++) {
3365
		adapter->tx_rings[i].count = adapter->tx_desc_count;
3366
		err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
Greg Rose's avatar
Greg Rose committed
3367 3368 3369
		if (!err)
			continue;
		dev_err(&adapter->pdev->dev,
3370
			"Allocation for Tx Queue %u failed\n", i);
Greg Rose's avatar
Greg Rose committed
3371 3372 3373 3374 3375 3376 3377
		break;
	}

	return err;
}

/**
3378
 * iavf_setup_all_rx_resources - allocate all queues Rx resources
Greg Rose's avatar
Greg Rose committed
3379 3380 3381 3382 3383 3384 3385 3386
 * @adapter: board private structure
 *
 * If this function returns with an error, then it's possible one or
 * more of the rings is populated (while the rest are not).  It is the
 * callers duty to clean those orphaned rings.
 *
 * Return 0 on success, negative on failure
 **/
3387
static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
3388 3389 3390
{
	int i, err = 0;

3391
	for (i = 0; i < adapter->num_active_queues; i++) {
3392
		adapter->rx_rings[i].count = adapter->rx_desc_count;
3393
		err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
Greg Rose's avatar
Greg Rose committed
3394 3395 3396
		if (!err)
			continue;
		dev_err(&adapter->pdev->dev,
3397
			"Allocation for Rx Queue %u failed\n", i);
Greg Rose's avatar
Greg Rose committed
3398 3399 3400 3401 3402 3403
		break;
	}
	return err;
}

/**
3404
 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
Greg Rose's avatar
Greg Rose committed
3405 3406 3407 3408
 * @adapter: board private structure
 *
 * Free all receive software resources
 **/
3409
void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
Greg Rose's avatar
Greg Rose committed
3410 3411 3412
{
	int i;

3413 3414 3415
	if (!adapter->rx_rings)
		return;

3416
	for (i = 0; i < adapter->num_active_queues; i++)
3417
		if (adapter->rx_rings[i].desc)
3418
			iavf_free_rx_resources(&adapter->rx_rings[i]);
Greg Rose's avatar
Greg Rose committed
3419 3420
}

3421
/**
3422
 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
3423 3424 3425
 * @adapter: board private structure
 * @max_tx_rate: max Tx bw for a tc
 **/
3426 3427
static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
				      u64 max_tx_rate)
3428 3429 3430
{
	int speed = 0, ret = 0;

3431 3432 3433 3434 3435 3436 3437 3438 3439 3440
	if (ADV_LINK_SUPPORT(adapter)) {
		if (adapter->link_speed_mbps < U32_MAX) {
			speed = adapter->link_speed_mbps;
			goto validate_bw;
		} else {
			dev_err(&adapter->pdev->dev, "Unknown link speed\n");
			return -EINVAL;
		}
	}

3441
	switch (adapter->link_speed) {
3442
	case VIRTCHNL_LINK_SPEED_40GB:
3443
		speed = SPEED_40000;
3444
		break;
3445
	case VIRTCHNL_LINK_SPEED_25GB:
3446
		speed = SPEED_25000;
3447
		break;
3448
	case VIRTCHNL_LINK_SPEED_20GB:
3449
		speed = SPEED_20000;
3450
		break;
3451
	case VIRTCHNL_LINK_SPEED_10GB:
3452 3453 3454 3455 3456 3457 3458
		speed = SPEED_10000;
		break;
	case VIRTCHNL_LINK_SPEED_5GB:
		speed = SPEED_5000;
		break;
	case VIRTCHNL_LINK_SPEED_2_5GB:
		speed = SPEED_2500;
3459
		break;
3460
	case VIRTCHNL_LINK_SPEED_1GB:
3461
		speed = SPEED_1000;
3462
		break;
3463
	case VIRTCHNL_LINK_SPEED_100MB:
3464
		speed = SPEED_100;
3465 3466 3467 3468 3469
		break;
	default:
		break;
	}

3470
validate_bw:
3471 3472 3473 3474 3475 3476 3477 3478 3479
	if (max_tx_rate > speed) {
		dev_err(&adapter->pdev->dev,
			"Invalid tx rate specified\n");
		ret = -EINVAL;
	}

	return ret;
}

3480
/**
3481
 * iavf_validate_ch_config - validate queue mapping info
3482 3483 3484 3485 3486 3487 3488
 * @adapter: board private structure
 * @mqprio_qopt: queue parameters
 *
 * This function validates if the config provided by the user to
 * configure queue channels is valid or not. Returns 0 on a valid
 * config.
 **/
3489 3490
static int iavf_validate_ch_config(struct iavf_adapter *adapter,
				   struct tc_mqprio_qopt_offload *mqprio_qopt)
3491
{
3492
	u64 total_max_rate = 0;
3493
	u32 tx_rate_rem = 0;
3494
	int i, num_qps = 0;
3495 3496
	u64 tx_rate = 0;
	int ret = 0;
3497

3498
	if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3499 3500 3501 3502 3503 3504 3505
	    mqprio_qopt->qopt.num_tc < 1)
		return -EINVAL;

	for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
		if (!mqprio_qopt->qopt.count[i] ||
		    mqprio_qopt->qopt.offset[i] != num_qps)
			return -EINVAL;
3506 3507
		if (mqprio_qopt->min_rate[i]) {
			dev_err(&adapter->pdev->dev,
3508 3509
				"Invalid min tx rate (greater than 0) specified for TC%d\n",
				i);
3510 3511
			return -EINVAL;
		}
3512 3513

		/* convert to Mbps */
3514
		tx_rate = div_u64(mqprio_qopt->max_rate[i],
3515
				  IAVF_MBPS_DIVISOR);
3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533

		if (mqprio_qopt->max_rate[i] &&
		    tx_rate < IAVF_MBPS_QUANTA) {
			dev_err(&adapter->pdev->dev,
				"Invalid max tx rate for TC%d, minimum %dMbps\n",
				i, IAVF_MBPS_QUANTA);
			return -EINVAL;
		}

		(void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);

		if (tx_rate_rem != 0) {
			dev_err(&adapter->pdev->dev,
				"Invalid max tx rate for TC%d, not divisible by %d\n",
				i, IAVF_MBPS_QUANTA);
			return -EINVAL;
		}

3534
		total_max_rate += tx_rate;
3535 3536
		num_qps += mqprio_qopt->qopt.count[i];
	}
3537 3538 3539
	if (num_qps > adapter->num_active_queues) {
		dev_err(&adapter->pdev->dev,
			"Cannot support requested number of queues\n");
3540
		return -EINVAL;
3541
	}
3542

3543
	ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3544
	return ret;
3545 3546
}

3547
/**
3548 3549
 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
 * @adapter: board private structure
3550
 **/
3551
static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3552
{
3553
	struct iavf_cloud_filter *cf, *cftmp;
3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564

	spin_lock_bh(&adapter->cloud_filter_list_lock);
	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
				 list) {
		list_del(&cf->list);
		kfree(cf);
		adapter->num_cloud_filters--;
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);
}

3565
/**
3566
 * __iavf_setup_tc - configure multiple traffic classes
3567
 * @netdev: network interface device structure
3568
 * @type_data: tc offload data
3569 3570 3571 3572 3573 3574 3575
 *
 * This function processes the config information provided by the
 * user to configure traffic classes/queue channels and packages the
 * information to request the PF to setup traffic classes.
 *
 * Returns 0 on success.
 **/
3576
static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3577 3578
{
	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3579
	struct iavf_adapter *adapter = netdev_priv(netdev);
3580 3581 3582
	struct virtchnl_vf_resource *vfres = adapter->vf_res;
	u8 num_tc = 0, total_qps = 0;
	int ret = 0, netdev_tc = 0;
3583
	u64 max_tx_rate;
3584 3585 3586 3587 3588 3589 3590 3591
	u16 mode;
	int i;

	num_tc = mqprio_qopt->qopt.num_tc;
	mode = mqprio_qopt->mode;

	/* delete queue_channel */
	if (!mqprio_qopt->qopt.hw) {
3592
		if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3593 3594 3595 3596 3597
			/* reset the tc configuration */
			netdev_reset_tc(netdev);
			adapter->num_tc = 0;
			netif_tx_stop_all_queues(netdev);
			netif_tx_disable(netdev);
3598 3599
			iavf_del_all_cloud_filters(adapter);
			adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3600
			total_qps = adapter->orig_num_active_queues;
3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612
			goto exit;
		} else {
			return -EINVAL;
		}
	}

	/* add queue channel */
	if (mode == TC_MQPRIO_MODE_CHANNEL) {
		if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
			dev_err(&adapter->pdev->dev, "ADq not supported\n");
			return -EOPNOTSUPP;
		}
3613
		if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3614 3615 3616 3617
			dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
			return -EINVAL;
		}

3618
		ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3619 3620 3621 3622 3623 3624 3625
		if (ret)
			return ret;
		/* Return if same TC config is requested */
		if (adapter->num_tc == num_tc)
			return 0;
		adapter->num_tc = num_tc;

3626
		for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3627 3628 3629 3630 3631 3632
			if (i < num_tc) {
				adapter->ch_config.ch_info[i].count =
					mqprio_qopt->qopt.count[i];
				adapter->ch_config.ch_info[i].offset =
					mqprio_qopt->qopt.offset[i];
				total_qps += mqprio_qopt->qopt.count[i];
3633 3634 3635
				max_tx_rate = mqprio_qopt->max_rate[i];
				/* convert to Mbps */
				max_tx_rate = div_u64(max_tx_rate,
3636
						      IAVF_MBPS_DIVISOR);
3637 3638
				adapter->ch_config.ch_info[i].max_tx_rate =
					max_tx_rate;
3639 3640 3641 3642 3643
			} else {
				adapter->ch_config.ch_info[i].count = 1;
				adapter->ch_config.ch_info[i].offset = 0;
			}
		}
3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656

		/* Take snapshot of original config such as "num_active_queues"
		 * It is used later when delete ADQ flow is exercised, so that
		 * once delete ADQ flow completes, VF shall go back to its
		 * original queue configuration
		 */

		adapter->orig_num_active_queues = adapter->num_active_queues;

		/* Store queue info based on TC so that VF gets configured
		 * with correct number of queues when VF completes ADQ config
		 * flow
		 */
3657
		adapter->ch_config.total_qps = total_qps;
3658

3659 3660
		netif_tx_stop_all_queues(netdev);
		netif_tx_disable(netdev);
3661
		adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3662 3663 3664
		netdev_reset_tc(netdev);
		/* Report the tc mapping up the stack */
		netdev_set_num_tc(adapter->netdev, num_tc);
3665
		for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3666 3667 3668 3669 3670 3671 3672 3673 3674
			u16 qcount = mqprio_qopt->qopt.count[i];
			u16 qoffset = mqprio_qopt->qopt.offset[i];

			if (i < num_tc)
				netdev_set_tc_queue(netdev, netdev_tc++, qcount,
						    qoffset);
		}
	}
exit:
3675 3676 3677 3678 3679 3680
	if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
		return 0;

	netif_set_real_num_rx_queues(netdev, total_qps);
	netif_set_real_num_tx_queues(netdev, total_qps);

3681 3682 3683
	return ret;
}

3684
/**
3685
 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3686
 * @adapter: board private structure
3687
 * @f: pointer to struct flow_cls_offload
3688 3689
 * @filter: pointer to cloud filter structure
 */
3690
static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3691
				 struct flow_cls_offload *f,
3692
				 struct iavf_cloud_filter *filter)
3693
{
3694
	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3695
	struct flow_dissector *dissector = rule->match.dissector;
3696 3697 3698 3699 3700 3701
	u16 n_proto_mask = 0;
	u16 n_proto_key = 0;
	u8 field_flags = 0;
	u16 addr_type = 0;
	u16 n_proto = 0;
	int i = 0;
3702
	struct virtchnl_filter *vf = &filter->f;
3703

3704
	if (dissector->used_keys &
3705 3706 3707 3708 3709 3710 3711 3712 3713
	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
		dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
3714
			dissector->used_keys);
3715 3716 3717
		return -EOPNOTSUPP;
	}

3718 3719
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
		struct flow_match_enc_keyid match;
3720

3721 3722
		flow_rule_match_enc_keyid(rule, &match);
		if (match.mask->keyid != 0)
3723
			field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3724 3725
	}

3726 3727
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_match_basic match;
3728

3729 3730 3731
		flow_rule_match_basic(rule, &match);
		n_proto_key = ntohs(match.key->n_proto);
		n_proto_mask = ntohs(match.mask->n_proto);
3732 3733 3734 3735 3736 3737 3738 3739 3740 3741

		if (n_proto_key == ETH_P_ALL) {
			n_proto_key = 0;
			n_proto_mask = 0;
		}
		n_proto = n_proto_key & n_proto_mask;
		if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
			return -EINVAL;
		if (n_proto == ETH_P_IPV6) {
			/* specify flow type as TCP IPv6 */
3742
			vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3743 3744
		}

3745
		if (match.key->ip_proto != IPPROTO_TCP) {
3746 3747 3748 3749 3750
			dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
			return -EINVAL;
		}
	}

3751 3752 3753 3754
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
		struct flow_match_eth_addrs match;

		flow_rule_match_eth_addrs(rule, &match);
3755 3756

		/* use is_broadcast and is_zero to check for all 0xf or 0 */
3757 3758
		if (!is_zero_ether_addr(match.mask->dst)) {
			if (is_broadcast_ether_addr(match.mask->dst)) {
3759
				field_flags |= IAVF_CLOUD_FIELD_OMAC;
3760 3761
			} else {
				dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3762
					match.mask->dst);
3763
				return -EINVAL;
3764 3765 3766
			}
		}

3767 3768
		if (!is_zero_ether_addr(match.mask->src)) {
			if (is_broadcast_ether_addr(match.mask->src)) {
3769
				field_flags |= IAVF_CLOUD_FIELD_IMAC;
3770 3771
			} else {
				dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3772
					match.mask->src);
3773
				return -EINVAL;
3774 3775 3776
			}
		}

3777 3778 3779
		if (!is_zero_ether_addr(match.key->dst))
			if (is_valid_ether_addr(match.key->dst) ||
			    is_multicast_ether_addr(match.key->dst)) {
3780 3781
				/* set the mask if a valid dst_mac address */
				for (i = 0; i < ETH_ALEN; i++)
3782 3783
					vf->mask.tcp_spec.dst_mac[i] |= 0xff;
				ether_addr_copy(vf->data.tcp_spec.dst_mac,
3784
						match.key->dst);
3785 3786
			}

3787 3788 3789
		if (!is_zero_ether_addr(match.key->src))
			if (is_valid_ether_addr(match.key->src) ||
			    is_multicast_ether_addr(match.key->src)) {
3790 3791
				/* set the mask if a valid dst_mac address */
				for (i = 0; i < ETH_ALEN; i++)
3792 3793
					vf->mask.tcp_spec.src_mac[i] |= 0xff;
				ether_addr_copy(vf->data.tcp_spec.src_mac,
3794
						match.key->src);
3795 3796 3797
		}
	}

3798 3799
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_match_vlan match;
3800

3801 3802 3803
		flow_rule_match_vlan(rule, &match);
		if (match.mask->vlan_id) {
			if (match.mask->vlan_id == VLAN_VID_MASK) {
3804
				field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3805 3806
			} else {
				dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3807
					match.mask->vlan_id);
3808
				return -EINVAL;
3809 3810
			}
		}
3811
		vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3812
		vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3813 3814
	}

3815 3816
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_match_control match;
3817

3818 3819
		flow_rule_match_control(rule, &match);
		addr_type = match.key->addr_type;
3820 3821 3822
	}

	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3823 3824 3825 3826 3827
		struct flow_match_ipv4_addrs match;

		flow_rule_match_ipv4_addrs(rule, &match);
		if (match.mask->dst) {
			if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3828
				field_flags |= IAVF_CLOUD_FIELD_IIP;
3829 3830
			} else {
				dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3831
					be32_to_cpu(match.mask->dst));
3832
				return -EINVAL;
3833 3834 3835
			}
		}

3836 3837
		if (match.mask->src) {
			if (match.mask->src == cpu_to_be32(0xffffffff)) {
3838
				field_flags |= IAVF_CLOUD_FIELD_IIP;
3839 3840
			} else {
				dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3841
					be32_to_cpu(match.mask->dst));
3842
				return -EINVAL;
3843 3844 3845
			}
		}

3846
		if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3847
			dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3848
			return -EINVAL;
3849
		}
3850
		if (match.key->dst) {
3851
			vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3852
			vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3853
		}
3854
		if (match.key->src) {
3855
			vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3856
			vf->data.tcp_spec.src_ip[0] = match.key->src;
3857 3858 3859 3860
		}
	}

	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3861 3862 3863
		struct flow_match_ipv6_addrs match;

		flow_rule_match_ipv6_addrs(rule, &match);
3864 3865

		/* validate mask, make sure it is not IPV6_ADDR_ANY */
3866
		if (ipv6_addr_any(&match.mask->dst)) {
3867 3868
			dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
				IPV6_ADDR_ANY);
3869
			return -EINVAL;
3870 3871 3872 3873 3874
		}

		/* src and dest IPv6 address should not be LOOPBACK
		 * (0:0:0:0:0:0:0:1) which can be represented as ::1
		 */
3875 3876
		if (ipv6_addr_loopback(&match.key->dst) ||
		    ipv6_addr_loopback(&match.key->src)) {
3877 3878
			dev_err(&adapter->pdev->dev,
				"ipv6 addr should not be loopback\n");
3879
			return -EINVAL;
3880
		}
3881 3882
		if (!ipv6_addr_any(&match.mask->dst) ||
		    !ipv6_addr_any(&match.mask->src))
3883
			field_flags |= IAVF_CLOUD_FIELD_IIP;
3884

3885 3886
		for (i = 0; i < 4; i++)
			vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3887
		memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3888 3889 3890
		       sizeof(vf->data.tcp_spec.dst_ip));
		for (i = 0; i < 4; i++)
			vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3891
		memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3892
		       sizeof(vf->data.tcp_spec.src_ip));
3893
	}
3894 3895 3896 3897 3898 3899
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
		struct flow_match_ports match;

		flow_rule_match_ports(rule, &match);
		if (match.mask->src) {
			if (match.mask->src == cpu_to_be16(0xffff)) {
3900
				field_flags |= IAVF_CLOUD_FIELD_IIP;
3901 3902
			} else {
				dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3903
					be16_to_cpu(match.mask->src));
3904
				return -EINVAL;
3905 3906 3907
			}
		}

3908 3909
		if (match.mask->dst) {
			if (match.mask->dst == cpu_to_be16(0xffff)) {
3910
				field_flags |= IAVF_CLOUD_FIELD_IIP;
3911 3912
			} else {
				dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3913
					be16_to_cpu(match.mask->dst));
3914
				return -EINVAL;
3915 3916
			}
		}
3917
		if (match.key->dst) {
3918
			vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3919
			vf->data.tcp_spec.dst_port = match.key->dst;
3920 3921
		}

3922
		if (match.key->src) {
3923
			vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3924
			vf->data.tcp_spec.src_port = match.key->src;
3925 3926
		}
	}
3927
	vf->field_flags = field_flags;
3928 3929 3930 3931 3932

	return 0;
}

/**
3933
 * iavf_handle_tclass - Forward to a traffic class on the device
3934 3935 3936 3937
 * @adapter: board private structure
 * @tc: traffic class index on the device
 * @filter: pointer to cloud filter structure
 */
3938 3939
static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
			      struct iavf_cloud_filter *filter)
3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955
{
	if (tc == 0)
		return 0;
	if (tc < adapter->num_tc) {
		if (!filter->f.data.tcp_spec.dst_port) {
			dev_err(&adapter->pdev->dev,
				"Specify destination port to redirect to traffic class other than TC0\n");
			return -EINVAL;
		}
	}
	/* redirect to a traffic class on the same device */
	filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
	filter->f.action_meta = tc;
	return 0;
}

3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978
/**
 * iavf_find_cf - Find the cloud filter in the list
 * @adapter: Board private structure
 * @cookie: filter specific cookie
 *
 * Returns ptr to the filter object or NULL. Must be called while holding the
 * cloud_filter_list_lock.
 */
static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
					      unsigned long *cookie)
{
	struct iavf_cloud_filter *filter = NULL;

	if (!cookie)
		return NULL;

	list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
		if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
			return filter;
	}
	return NULL;
}

3979
/**
3980
 * iavf_configure_clsflower - Add tc flower filters
3981
 * @adapter: board private structure
3982
 * @cls_flower: Pointer to struct flow_cls_offload
3983
 */
3984
static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3985
				    struct flow_cls_offload *cls_flower)
3986 3987
{
	int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
3988
	struct iavf_cloud_filter *filter = NULL;
3989
	int err = -EINVAL, count = 50;
3990 3991 3992 3993 3994 3995 3996

	if (tc < 0) {
		dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
		return -EINVAL;
	}

	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
3997 3998 3999
	if (!filter)
		return -ENOMEM;

4000
	while (!mutex_trylock(&adapter->crit_lock)) {
4001 4002 4003 4004
		if (--count == 0) {
			kfree(filter);
			return err;
		}
4005
		udelay(1);
4006
	}
4007

4008 4009
	filter->cookie = cls_flower->cookie;

4010 4011 4012 4013 4014 4015 4016 4017 4018
	/* bail out here if filter already exists */
	spin_lock_bh(&adapter->cloud_filter_list_lock);
	if (iavf_find_cf(adapter, &cls_flower->cookie)) {
		dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
		err = -EEXIST;
		goto spin_unlock;
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);

4019 4020 4021 4022
	/* set the mask to all zeroes to begin with */
	memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
	/* start out with flow type and eth type IPv4 to begin with */
	filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
4023
	err = iavf_parse_cls_flower(adapter, cls_flower, filter);
4024
	if (err)
4025 4026
		goto err;

4027
	err = iavf_handle_tclass(adapter, tc, filter);
4028
	if (err)
4029 4030 4031 4032 4033 4034 4035
		goto err;

	/* add filter to the list */
	spin_lock_bh(&adapter->cloud_filter_list_lock);
	list_add_tail(&filter->list, &adapter->cloud_filter_list);
	adapter->num_cloud_filters++;
	filter->add = true;
4036
	adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
4037
spin_unlock:
4038 4039 4040 4041
	spin_unlock_bh(&adapter->cloud_filter_list_lock);
err:
	if (err)
		kfree(filter);
4042

4043
	mutex_unlock(&adapter->crit_lock);
4044 4045 4046 4047
	return err;
}

/**
4048
 * iavf_delete_clsflower - Remove tc flower filters
4049
 * @adapter: board private structure
4050
 * @cls_flower: Pointer to struct flow_cls_offload
4051
 */
4052
static int iavf_delete_clsflower(struct iavf_adapter *adapter,
4053
				 struct flow_cls_offload *cls_flower)
4054
{
4055
	struct iavf_cloud_filter *filter = NULL;
4056 4057 4058
	int err = 0;

	spin_lock_bh(&adapter->cloud_filter_list_lock);
4059
	filter = iavf_find_cf(adapter, &cls_flower->cookie);
4060 4061
	if (filter) {
		filter->del = true;
4062
		adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
4063 4064 4065 4066 4067 4068 4069 4070 4071
	} else {
		err = -EINVAL;
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);

	return err;
}

/**
4072
 * iavf_setup_tc_cls_flower - flower classifier offloads
4073 4074
 * @adapter: board private structure
 * @cls_flower: pointer to flow_cls_offload struct with flow info
4075
 */
4076
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4077
				    struct flow_cls_offload *cls_flower)
4078 4079
{
	switch (cls_flower->command) {
4080
	case FLOW_CLS_REPLACE:
4081
		return iavf_configure_clsflower(adapter, cls_flower);
4082
	case FLOW_CLS_DESTROY:
4083
		return iavf_delete_clsflower(adapter, cls_flower);
4084
	case FLOW_CLS_STATS:
4085 4086
		return -EOPNOTSUPP;
	default:
4087
		return -EOPNOTSUPP;
4088 4089 4090 4091
	}
}

/**
4092
 * iavf_setup_tc_block_cb - block callback for tc
4093 4094 4095 4096 4097 4098
 * @type: type of offload
 * @type_data: offload data
 * @cb_priv:
 *
 * This function is the block callback for traffic classes
 **/
4099 4100
static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
				  void *cb_priv)
4101
{
4102 4103 4104 4105 4106
	struct iavf_adapter *adapter = cb_priv;

	if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
		return -EOPNOTSUPP;

4107 4108
	switch (type) {
	case TC_SETUP_CLSFLOWER:
4109
		return iavf_setup_tc_cls_flower(cb_priv, type_data);
4110 4111 4112 4113 4114
	default:
		return -EOPNOTSUPP;
	}
}

4115 4116
static LIST_HEAD(iavf_block_cb_list);

4117
/**
4118
 * iavf_setup_tc - configure multiple traffic classes
4119 4120
 * @netdev: network interface device structure
 * @type: type of offload
4121
 * @type_data: tc offload data
4122 4123 4124 4125 4126 4127
 *
 * This function is the callback to ndo_setup_tc in the
 * netdev_ops.
 *
 * Returns 0 on success
 **/
4128 4129
static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
			 void *type_data)
4130
{
4131 4132
	struct iavf_adapter *adapter = netdev_priv(netdev);

4133 4134
	switch (type) {
	case TC_SETUP_QDISC_MQPRIO:
4135
		return __iavf_setup_tc(netdev, type_data);
4136
	case TC_SETUP_BLOCK:
4137 4138
		return flow_block_cb_setup_simple(type_data,
						  &iavf_block_cb_list,
4139 4140
						  iavf_setup_tc_block_cb,
						  adapter, adapter, true);
4141
	default:
4142
		return -EOPNOTSUPP;
4143
	}
4144 4145
}

Greg Rose's avatar
Greg Rose committed
4146
/**
4147
 * iavf_open - Called when a network interface is made active
Greg Rose's avatar
Greg Rose committed
4148 4149 4150 4151 4152 4153 4154
 * @netdev: network interface device structure
 *
 * Returns 0 on success, negative value on failure
 *
 * The open entry point is called when a network interface is made
 * active by the system (IFF_UP).  At this point all resources needed
 * for transmit and receive operations are allocated, the interrupt
4155
 * handler is registered with the OS, the watchdog is started,
Greg Rose's avatar
Greg Rose committed
4156 4157
 * and the stack is notified that the interface is ready.
 **/
4158
static int iavf_open(struct net_device *netdev)
Greg Rose's avatar
Greg Rose committed
4159
{
4160
	struct iavf_adapter *adapter = netdev_priv(netdev);
Greg Rose's avatar
Greg Rose committed
4161 4162
	int err;

4163
	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
4164 4165 4166
		dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
		return -EIO;
	}
4167

4168 4169 4170 4171 4172 4173 4174 4175 4176
	while (!mutex_trylock(&adapter->crit_lock)) {
		/* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
		 * is already taken and iavf_open is called from an upper
		 * device's notifier reacting on NETDEV_REGISTER event.
		 * We have to leave here to avoid dead lock.
		 */
		if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
			return -EBUSY;

4177
		usleep_range(500, 1000);
4178
	}
4179

4180
	if (adapter->state != __IAVF_DOWN) {
4181 4182 4183
		err = -EBUSY;
		goto err_unlock;
	}
Greg Rose's avatar
Greg Rose committed
4184

4185 4186 4187 4188 4189 4190 4191
	if (adapter->state == __IAVF_RUNNING &&
	    !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
		dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
		err = 0;
		goto err_unlock;
	}

Greg Rose's avatar
Greg Rose committed
4192
	/* allocate transmit descriptors */
4193
	err = iavf_setup_all_tx_resources(adapter);
Greg Rose's avatar
Greg Rose committed
4194 4195 4196 4197
	if (err)
		goto err_setup_tx;

	/* allocate receive descriptors */
4198
	err = iavf_setup_all_rx_resources(adapter);
Greg Rose's avatar
Greg Rose committed
4199 4200 4201 4202
	if (err)
		goto err_setup_rx;

	/* clear any pending interrupts, may auto mask */
4203
	err = iavf_request_traffic_irqs(adapter, netdev->name);
Greg Rose's avatar
Greg Rose committed
4204 4205 4206
	if (err)
		goto err_req_irq;

4207 4208
	spin_lock_bh(&adapter->mac_vlan_list_lock);

4209
	iavf_add_filter(adapter, adapter->hw.mac.addr);
4210 4211 4212

	spin_unlock_bh(&adapter->mac_vlan_list_lock);

4213 4214 4215
	/* Restore VLAN filters that were removed with IFF_DOWN */
	iavf_restore_filters(adapter);

4216
	iavf_configure(adapter);
Greg Rose's avatar
Greg Rose committed
4217

4218
	iavf_up_complete(adapter);
Greg Rose's avatar
Greg Rose committed
4219

4220
	iavf_irq_enable(adapter, true);
Greg Rose's avatar
Greg Rose committed
4221

4222
	mutex_unlock(&adapter->crit_lock);
4223

Greg Rose's avatar
Greg Rose committed
4224 4225 4226
	return 0;

err_req_irq:
4227 4228
	iavf_down(adapter);
	iavf_free_traffic_irqs(adapter);
Greg Rose's avatar
Greg Rose committed
4229
err_setup_rx:
4230
	iavf_free_all_rx_resources(adapter);
Greg Rose's avatar
Greg Rose committed
4231
err_setup_tx:
4232
	iavf_free_all_tx_resources(adapter);
4233
err_unlock:
4234
	mutex_unlock(&adapter->crit_lock);
Greg Rose's avatar
Greg Rose committed
4235 4236 4237 4238 4239

	return err;
}

/**
4240
 * iavf_close - Disables a network interface
Greg Rose's avatar
Greg Rose committed
4241 4242 4243 4244 4245 4246 4247 4248 4249
 * @netdev: network interface device structure
 *
 * Returns 0, this is not allowed to fail
 *
 * The close entry point is called when an interface is de-activated
 * by the OS.  The hardware is still under the drivers control, but
 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
 * are freed, along with all transmit and receive resources.
 **/
4250
static int iavf_close(struct net_device *netdev)
Greg Rose's avatar
Greg Rose committed
4251
{
4252
	struct iavf_adapter *adapter = netdev_priv(netdev);
4253
	u64 aq_to_restore;
4254
	int status;
Greg Rose's avatar
Greg Rose committed
4255

4256
	mutex_lock(&adapter->crit_lock);
4257

4258 4259 4260 4261
	if (adapter->state <= __IAVF_DOWN_PENDING) {
		mutex_unlock(&adapter->crit_lock);
		return 0;
	}
4262

4263
	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
4264
	if (CLIENT_ENABLED(adapter))
4265
		adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288
	/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
	 * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
	 * deadlock with adminq_task() until iavf_close timeouts. We must send
	 * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
	 * disable queues possible for vf. Give only necessary flags to
	 * iavf_down and save other to set them right before iavf_close()
	 * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
	 * iavf will be in DOWN state.
	 */
	aq_to_restore = adapter->aq_required;
	adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;

	/* Remove flags which we do not want to send after close or we want to
	 * send before disable queues.
	 */
	aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG		|
			   IAVF_FLAG_AQ_ENABLE_QUEUES		|
			   IAVF_FLAG_AQ_CONFIGURE_QUEUES	|
			   IAVF_FLAG_AQ_ADD_VLAN_FILTER		|
			   IAVF_FLAG_AQ_ADD_MAC_FILTER		|
			   IAVF_FLAG_AQ_ADD_CLOUD_FILTER	|
			   IAVF_FLAG_AQ_ADD_FDIR_FILTER		|
			   IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
Greg Rose's avatar
Greg Rose committed
4289

4290
	iavf_down(adapter);
4291
	iavf_change_state(adapter, __IAVF_DOWN_PENDING);
4292
	iavf_free_traffic_irqs(adapter);
Greg Rose's avatar
Greg Rose committed
4293

4294
	mutex_unlock(&adapter->crit_lock);
4295

Mitch Williams's avatar
Mitch Williams committed
4296 4297
	/* We explicitly don't free resources here because the hardware is
	 * still active and can DMA into memory. Resources are cleared in
4298
	 * iavf_virtchnl_completion() after we get confirmation from the PF
Mitch Williams's avatar
Mitch Williams committed
4299
	 * driver that the rings have been stopped.
4300
	 *
4301 4302
	 * Also, we wait for state to transition to __IAVF_DOWN before
	 * returning. State change occurs in iavf_virtchnl_completion() after
4303 4304
	 * VF resources are released (which occurs after PF driver processes and
	 * responds to admin queue commands).
Mitch Williams's avatar
Mitch Williams committed
4305
	 */
4306 4307

	status = wait_event_timeout(adapter->down_waitqueue,
4308
				    adapter->state == __IAVF_DOWN,
4309
				    msecs_to_jiffies(500));
4310 4311
	if (!status)
		netdev_warn(netdev, "Device resources not yet released\n");
4312 4313 4314 4315

	mutex_lock(&adapter->crit_lock);
	adapter->aq_required |= aq_to_restore;
	mutex_unlock(&adapter->crit_lock);
Greg Rose's avatar
Greg Rose committed
4316 4317 4318 4319
	return 0;
}

/**
4320
 * iavf_change_mtu - Change the Maximum Transfer Unit
Greg Rose's avatar
Greg Rose committed
4321 4322 4323 4324 4325
 * @netdev: network interface device structure
 * @new_mtu: new value for maximum frame size
 *
 * Returns 0 on success, negative on failure
 **/
4326
static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
Greg Rose's avatar
Greg Rose committed
4327
{
4328
	struct iavf_adapter *adapter = netdev_priv(netdev);
Greg Rose's avatar
Greg Rose committed
4329

4330 4331
	netdev_dbg(netdev, "changing MTU from %d to %d\n",
		   netdev->mtu, new_mtu);
Greg Rose's avatar
Greg Rose committed
4332
	netdev->mtu = new_mtu;
4333
	if (CLIENT_ENABLED(adapter)) {
4334 4335
		iavf_notify_client_l2_params(&adapter->vsi);
		adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
4336
	}
4337 4338 4339 4340 4341

	if (netif_running(netdev)) {
		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
		queue_work(iavf_wq, &adapter->reset_task);
	}
4342

Greg Rose's avatar
Greg Rose committed
4343 4344 4345
	return 0;
}

4346 4347 4348 4349 4350
#define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
					 NETIF_F_HW_VLAN_CTAG_TX | \
					 NETIF_F_HW_VLAN_STAG_RX | \
					 NETIF_F_HW_VLAN_STAG_TX)

4351
/**
4352
 * iavf_set_features - set the netdev feature flags
4353 4354 4355 4356
 * @netdev: ptr to the netdev being adjusted
 * @features: the feature set that the stack is suggesting
 * Note: expects to be called while under rtnl_lock()
 **/
4357 4358
static int iavf_set_features(struct net_device *netdev,
			     netdev_features_t features)
4359
{
4360
	struct iavf_adapter *adapter = netdev_priv(netdev);
4361

4362 4363 4364 4365 4366
	/* trigger update on any VLAN feature change */
	if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
	    (features & NETIF_VLAN_OFFLOAD_FEATURES))
		iavf_set_vlan_offload_features(adapter, netdev->features,
					       features);
4367 4368 4369 4370

	return 0;
}

4371
/**
4372
 * iavf_features_check - Validate encapsulated packet conforms to limits
4373
 * @skb: skb buff
4374
 * @dev: This physical port's netdev
4375 4376
 * @features: Offload features that the stack believes apply
 **/
4377 4378 4379
static netdev_features_t iavf_features_check(struct sk_buff *skb,
					     struct net_device *dev,
					     netdev_features_t features)
4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419
{
	size_t len;

	/* No point in doing any of this if neither checksum nor GSO are
	 * being requested for this frame.  We can rule out both by just
	 * checking for CHECKSUM_PARTIAL
	 */
	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return features;

	/* We cannot support GSO if the MSS is going to be less than
	 * 64 bytes.  If it is then we need to drop support for GSO.
	 */
	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
		features &= ~NETIF_F_GSO_MASK;

	/* MACLEN can support at most 63 words */
	len = skb_network_header(skb) - skb->data;
	if (len & ~(63 * 2))
		goto out_err;

	/* IPLEN and EIPLEN can support at most 127 dwords */
	len = skb_transport_header(skb) - skb_network_header(skb);
	if (len & ~(127 * 4))
		goto out_err;

	if (skb->encapsulation) {
		/* L4TUNLEN can support 127 words */
		len = skb_inner_network_header(skb) - skb_transport_header(skb);
		if (len & ~(127 * 2))
			goto out_err;

		/* IPLEN can support at most 127 dwords */
		len = skb_inner_transport_header(skb) -
		      skb_inner_network_header(skb);
		if (len & ~(127 * 4))
			goto out_err;
	}

	/* No need to validate L4LEN as TCP is the only protocol with a
4420
	 * flexible value and we support all possible values supported
4421 4422 4423 4424 4425 4426 4427 4428
	 * by TCP, which is at most 15 dwords
	 */

	return features;
out_err:
	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}

4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650
/**
 * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
 * @adapter: board private structure
 *
 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
 * were negotiated determine the VLAN features that can be toggled on and off.
 **/
static netdev_features_t
iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
{
	netdev_features_t hw_features = 0;

	if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
		return hw_features;

	/* Enable VLAN features if supported */
	if (VLAN_ALLOWED(adapter)) {
		hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
				NETIF_F_HW_VLAN_CTAG_RX);
	} else if (VLAN_V2_ALLOWED(adapter)) {
		struct virtchnl_vlan_caps *vlan_v2_caps =
			&adapter->vlan_v2_caps;
		struct virtchnl_vlan_supported_caps *stripping_support =
			&vlan_v2_caps->offloads.stripping_support;
		struct virtchnl_vlan_supported_caps *insertion_support =
			&vlan_v2_caps->offloads.insertion_support;

		if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
		    stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
			if (stripping_support->outer &
			    VIRTCHNL_VLAN_ETHERTYPE_8100)
				hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
			if (stripping_support->outer &
			    VIRTCHNL_VLAN_ETHERTYPE_88A8)
				hw_features |= NETIF_F_HW_VLAN_STAG_RX;
		} else if (stripping_support->inner !=
			   VIRTCHNL_VLAN_UNSUPPORTED &&
			   stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
			if (stripping_support->inner &
			    VIRTCHNL_VLAN_ETHERTYPE_8100)
				hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
		}

		if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
		    insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
			if (insertion_support->outer &
			    VIRTCHNL_VLAN_ETHERTYPE_8100)
				hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
			if (insertion_support->outer &
			    VIRTCHNL_VLAN_ETHERTYPE_88A8)
				hw_features |= NETIF_F_HW_VLAN_STAG_TX;
		} else if (insertion_support->inner &&
			   insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
			if (insertion_support->inner &
			    VIRTCHNL_VLAN_ETHERTYPE_8100)
				hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
		}
	}

	return hw_features;
}

/**
 * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
 * @adapter: board private structure
 *
 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
 * were negotiated determine the VLAN features that are enabled by default.
 **/
static netdev_features_t
iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
{
	netdev_features_t features = 0;

	if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
		return features;

	if (VLAN_ALLOWED(adapter)) {
		features |= NETIF_F_HW_VLAN_CTAG_FILTER |
			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
	} else if (VLAN_V2_ALLOWED(adapter)) {
		struct virtchnl_vlan_caps *vlan_v2_caps =
			&adapter->vlan_v2_caps;
		struct virtchnl_vlan_supported_caps *filtering_support =
			&vlan_v2_caps->filtering.filtering_support;
		struct virtchnl_vlan_supported_caps *stripping_support =
			&vlan_v2_caps->offloads.stripping_support;
		struct virtchnl_vlan_supported_caps *insertion_support =
			&vlan_v2_caps->offloads.insertion_support;
		u32 ethertype_init;

		/* give priority to outer stripping and don't support both outer
		 * and inner stripping
		 */
		ethertype_init = vlan_v2_caps->offloads.ethertype_init;
		if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
			if (stripping_support->outer &
			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
				features |= NETIF_F_HW_VLAN_CTAG_RX;
			else if (stripping_support->outer &
				 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
				 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
				features |= NETIF_F_HW_VLAN_STAG_RX;
		} else if (stripping_support->inner !=
			   VIRTCHNL_VLAN_UNSUPPORTED) {
			if (stripping_support->inner &
			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
				features |= NETIF_F_HW_VLAN_CTAG_RX;
		}

		/* give priority to outer insertion and don't support both outer
		 * and inner insertion
		 */
		if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
			if (insertion_support->outer &
			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
				features |= NETIF_F_HW_VLAN_CTAG_TX;
			else if (insertion_support->outer &
				 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
				 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
				features |= NETIF_F_HW_VLAN_STAG_TX;
		} else if (insertion_support->inner !=
			   VIRTCHNL_VLAN_UNSUPPORTED) {
			if (insertion_support->inner &
			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
				features |= NETIF_F_HW_VLAN_CTAG_TX;
		}

		/* give priority to outer filtering and don't bother if both
		 * outer and inner filtering are enabled
		 */
		ethertype_init = vlan_v2_caps->filtering.ethertype_init;
		if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
			if (filtering_support->outer &
			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
			if (filtering_support->outer &
			    VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
				features |= NETIF_F_HW_VLAN_STAG_FILTER;
		} else if (filtering_support->inner !=
			   VIRTCHNL_VLAN_UNSUPPORTED) {
			if (filtering_support->inner &
			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
			if (filtering_support->inner &
			    VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
				features |= NETIF_F_HW_VLAN_STAG_FILTER;
		}
	}

	return features;
}

#define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
	(!(((requested) & (feature_bit)) && \
	   !((allowed) & (feature_bit))))

/**
 * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
 * @adapter: board private structure
 * @requested_features: stack requested NETDEV features
 **/
static netdev_features_t
iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
			      netdev_features_t requested_features)
{
	netdev_features_t allowed_features;

	allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
		iavf_get_netdev_vlan_features(adapter);

	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
					      allowed_features,
					      NETIF_F_HW_VLAN_CTAG_TX))
		requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;

	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
					      allowed_features,
					      NETIF_F_HW_VLAN_CTAG_RX))
		requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;

	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
					      allowed_features,
					      NETIF_F_HW_VLAN_STAG_TX))
		requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
					      allowed_features,
					      NETIF_F_HW_VLAN_STAG_RX))
		requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;

	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
					      allowed_features,
					      NETIF_F_HW_VLAN_CTAG_FILTER))
		requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;

	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
					      allowed_features,
					      NETIF_F_HW_VLAN_STAG_FILTER))
		requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;

	if ((requested_features &
	     (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
	    (requested_features &
	     (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
	    adapter->vlan_v2_caps.offloads.ethertype_match ==
	    VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
		netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
		requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
					NETIF_F_HW_VLAN_STAG_TX);
	}

	return requested_features;
}

Mitch Williams's avatar
Mitch Williams committed
4651
/**
4652
 * iavf_fix_features - fix up the netdev feature bits
Mitch Williams's avatar
Mitch Williams committed
4653 4654 4655 4656 4657
 * @netdev: our net device
 * @features: desired feature bits
 *
 * Returns fixed-up features bits
 **/
4658 4659
static netdev_features_t iavf_fix_features(struct net_device *netdev,
					   netdev_features_t features)
Mitch Williams's avatar
Mitch Williams committed
4660
{
4661
	struct iavf_adapter *adapter = netdev_priv(netdev);
Mitch Williams's avatar
Mitch Williams committed
4662

4663
	return iavf_fix_netdev_vlan_features(adapter, features);
Mitch Williams's avatar
Mitch Williams committed
4664 4665
}

4666 4667 4668 4669 4670
static const struct net_device_ops iavf_netdev_ops = {
	.ndo_open		= iavf_open,
	.ndo_stop		= iavf_close,
	.ndo_start_xmit		= iavf_xmit_frame,
	.ndo_set_rx_mode	= iavf_set_rx_mode,
Greg Rose's avatar
Greg Rose committed
4671
	.ndo_validate_addr	= eth_validate_addr,
4672 4673 4674 4675 4676 4677 4678 4679 4680
	.ndo_set_mac_address	= iavf_set_mac,
	.ndo_change_mtu		= iavf_change_mtu,
	.ndo_tx_timeout		= iavf_tx_timeout,
	.ndo_vlan_rx_add_vid	= iavf_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid	= iavf_vlan_rx_kill_vid,
	.ndo_features_check	= iavf_features_check,
	.ndo_fix_features	= iavf_fix_features,
	.ndo_set_features	= iavf_set_features,
	.ndo_setup_tc		= iavf_setup_tc,
Greg Rose's avatar
Greg Rose committed
4681 4682 4683
};

/**
4684
 * iavf_check_reset_complete - check that VF reset is complete
Greg Rose's avatar
Greg Rose committed
4685 4686 4687 4688
 * @hw: pointer to hw struct
 *
 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
 **/
4689
static int iavf_check_reset_complete(struct iavf_hw *hw)
Greg Rose's avatar
Greg Rose committed
4690 4691 4692 4693
{
	u32 rstat;
	int i;

4694
	for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4695 4696
		rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
			     IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
4697 4698
		if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
		    (rstat == VIRTCHNL_VFR_COMPLETED))
Greg Rose's avatar
Greg Rose committed
4699
			return 0;
4700
		usleep_range(10, 20);
Greg Rose's avatar
Greg Rose committed
4701 4702 4703 4704
	}
	return -EBUSY;
}

Mitch Williams's avatar
Mitch Williams committed
4705
/**
4706
 * iavf_process_config - Process the config information we got from the PF
Mitch Williams's avatar
Mitch Williams committed
4707 4708 4709 4710 4711
 * @adapter: board private structure
 *
 * Verify that we have a valid config struct, and set up our netdev features
 * and our VSI struct.
 **/
4712
int iavf_process_config(struct iavf_adapter *adapter)
Mitch Williams's avatar
Mitch Williams committed
4713
{
4714
	struct virtchnl_vf_resource *vfres = adapter->vf_res;
4715
	netdev_features_t hw_vlan_features, vlan_features;
Mitch Williams's avatar
Mitch Williams committed
4716
	struct net_device *netdev = adapter->netdev;
4717 4718
	netdev_features_t hw_enc_features;
	netdev_features_t hw_features;
Mitch Williams's avatar
Mitch Williams committed
4719

4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735
	hw_enc_features = NETIF_F_SG			|
			  NETIF_F_IP_CSUM		|
			  NETIF_F_IPV6_CSUM		|
			  NETIF_F_HIGHDMA		|
			  NETIF_F_SOFT_FEATURES	|
			  NETIF_F_TSO			|
			  NETIF_F_TSO_ECN		|
			  NETIF_F_TSO6			|
			  NETIF_F_SCTP_CRC		|
			  NETIF_F_RXHASH		|
			  NETIF_F_RXCSUM		|
			  0;

	/* advertise to stack only if offloads for encapsulated packets is
	 * supported
	 */
4736
	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
4737
		hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL	|
4738
				   NETIF_F_GSO_GRE		|
4739
				   NETIF_F_GSO_GRE_CSUM		|
4740
				   NETIF_F_GSO_IPXIP4		|
4741
				   NETIF_F_GSO_IPXIP6		|
4742
				   NETIF_F_GSO_UDP_TUNNEL_CSUM	|
4743
				   NETIF_F_GSO_PARTIAL		|
4744 4745
				   0;

4746
		if (!(vfres->vf_cap_flags &
4747
		      VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
4748 4749
			netdev->gso_partial_features |=
				NETIF_F_GSO_UDP_TUNNEL_CSUM;
4750

4751 4752 4753 4754
		netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
		netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
		netdev->hw_enc_features |= hw_enc_features;
	}
4755
	/* record features VLANs can make use of */
4756
	netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
4757 4758

	/* Write features and hw_features separately to avoid polluting
4759
	 * with, or dropping, features that are set when we registered.
4760
	 */
4761
	hw_features = hw_enc_features;
4762

4763 4764 4765
	/* get HW VLAN features that can be toggled */
	hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);

4766 4767 4768
	/* Enable cloud filter if ADQ is supported */
	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
		hw_features |= NETIF_F_HW_TC;
4769 4770
	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
		hw_features |= NETIF_F_GSO_UDP_L4;
4771

4772 4773
	netdev->hw_features |= hw_features | hw_vlan_features;
	vlan_features = iavf_get_netdev_vlan_features(adapter);
4774

4775
	netdev->features |= hw_features | vlan_features;
4776 4777 4778

	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
Mitch Williams's avatar
Mitch Williams committed
4779

4780 4781
	netdev->priv_flags |= IFF_UNICAST_FLT;

4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799
	/* Do not turn on offloads when they are requested to be turned off.
	 * TSO needs minimum 576 bytes to work correctly.
	 */
	if (netdev->wanted_features) {
		if (!(netdev->wanted_features & NETIF_F_TSO) ||
		    netdev->mtu < 576)
			netdev->features &= ~NETIF_F_TSO;
		if (!(netdev->wanted_features & NETIF_F_TSO6) ||
		    netdev->mtu < 576)
			netdev->features &= ~NETIF_F_TSO6;
		if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
			netdev->features &= ~NETIF_F_TSO_ECN;
		if (!(netdev->wanted_features & NETIF_F_GRO))
			netdev->features &= ~NETIF_F_GRO;
		if (!(netdev->wanted_features & NETIF_F_GSO))
			netdev->features &= ~NETIF_F_GSO;
	}

Mitch Williams's avatar
Mitch Williams committed
4800 4801 4802
	return 0;
}

Greg Rose's avatar
Greg Rose committed
4803
/**
4804
 * iavf_shutdown - Shutdown the device in preparation for a reboot
Greg Rose's avatar
Greg Rose committed
4805 4806
 * @pdev: pci device structure
 **/
4807
static void iavf_shutdown(struct pci_dev *pdev)
Greg Rose's avatar
Greg Rose committed
4808
{
4809 4810
	struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
	struct net_device *netdev = adapter->netdev;
Greg Rose's avatar
Greg Rose committed
4811 4812 4813 4814

	netif_device_detach(netdev);

	if (netif_running(netdev))
4815
		iavf_close(netdev);
Greg Rose's avatar
Greg Rose committed
4816

4817 4818
	if (iavf_lock_timeout(&adapter->crit_lock, 5000))
		dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
4819
	/* Prevent the watchdog from running. */
4820
	iavf_change_state(adapter, __IAVF_REMOVE);
4821
	adapter->aq_required = 0;
4822
	mutex_unlock(&adapter->crit_lock);
4823

Greg Rose's avatar
Greg Rose committed
4824 4825 4826 4827 4828 4829 4830 4831
#ifdef CONFIG_PM
	pci_save_state(pdev);

#endif
	pci_disable_device(pdev);
}

/**
4832
 * iavf_probe - Device Initialization Routine
Greg Rose's avatar
Greg Rose committed
4833
 * @pdev: PCI device information struct
4834
 * @ent: entry in iavf_pci_tbl
Greg Rose's avatar
Greg Rose committed
4835 4836 4837
 *
 * Returns 0 on success, negative on failure
 *
4838
 * iavf_probe initializes an adapter identified by a pci_dev structure.
Greg Rose's avatar
Greg Rose committed
4839 4840 4841
 * The OS initialization, configuring of the adapter private structure,
 * and a hardware reset occur.
 **/
4842
static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Greg Rose's avatar
Greg Rose committed
4843 4844
{
	struct net_device *netdev;
4845
	struct iavf_adapter *adapter = NULL;
4846
	struct iavf_hw *hw = NULL;
4847
	int err;
Greg Rose's avatar
Greg Rose committed
4848 4849 4850 4851 4852

	err = pci_enable_device(pdev);
	if (err)
		return err;

4853 4854
	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
	if (err) {
4855 4856 4857
		dev_err(&pdev->dev,
			"DMA configuration failed: 0x%x\n", err);
		goto err_dma;
Greg Rose's avatar
Greg Rose committed
4858 4859
	}

4860
	err = pci_request_regions(pdev, iavf_driver_name);
Greg Rose's avatar
Greg Rose committed
4861 4862 4863 4864 4865 4866 4867 4868 4869 4870
	if (err) {
		dev_err(&pdev->dev,
			"pci_request_regions failed 0x%x\n", err);
		goto err_pci_reg;
	}

	pci_enable_pcie_error_reporting(pdev);

	pci_set_master(pdev);

4871 4872
	netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
				   IAVF_MAX_REQ_QUEUES);
Greg Rose's avatar
Greg Rose committed
4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888
	if (!netdev) {
		err = -ENOMEM;
		goto err_alloc_etherdev;
	}

	SET_NETDEV_DEV(netdev, &pdev->dev);

	pci_set_drvdata(pdev, netdev);
	adapter = netdev_priv(netdev);

	adapter->netdev = netdev;
	adapter->pdev = pdev;

	hw = &adapter->hw;
	hw->back = adapter;

4889
	adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4890
	iavf_change_state(adapter, __IAVF_STARTUP);
Greg Rose's avatar
Greg Rose committed
4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907

	/* Call save state here because it relies on the adapter struct. */
	pci_save_state(pdev);

	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
			      pci_resource_len(pdev, 0));
	if (!hw->hw_addr) {
		err = -EIO;
		goto err_ioremap;
	}
	hw->vendor_id = pdev->vendor;
	hw->device_id = pdev->device;
	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
	hw->subsystem_vendor_id = pdev->subsystem_vendor;
	hw->subsystem_device_id = pdev->subsystem_device;
	hw->bus.device = PCI_SLOT(pdev->devfn);
	hw->bus.func = PCI_FUNC(pdev->devfn);
4908
	hw->bus.bus_id = pdev->bus->number;
Greg Rose's avatar
Greg Rose committed
4909

4910 4911 4912
	/* set up the locks for the AQ, do this only once in probe
	 * and destroy them only once in remove
	 */
4913 4914
	mutex_init(&adapter->crit_lock);
	mutex_init(&adapter->client_lock);
4915 4916 4917
	mutex_init(&hw->aq.asq_mutex);
	mutex_init(&hw->aq.arq_mutex);

4918
	spin_lock_init(&adapter->mac_vlan_list_lock);
4919
	spin_lock_init(&adapter->cloud_filter_list_lock);
4920
	spin_lock_init(&adapter->fdir_fltr_lock);
4921
	spin_lock_init(&adapter->adv_rss_lock);
4922

4923 4924
	INIT_LIST_HEAD(&adapter->mac_filter_list);
	INIT_LIST_HEAD(&adapter->vlan_filter_list);
4925
	INIT_LIST_HEAD(&adapter->cloud_filter_list);
4926
	INIT_LIST_HEAD(&adapter->fdir_list_head);
4927
	INIT_LIST_HEAD(&adapter->adv_rss_list_head);
4928

4929 4930
	INIT_WORK(&adapter->reset_task, iavf_reset_task);
	INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4931
	INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4932
	INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4933
	queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4934
			   msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
Greg Rose's avatar
Greg Rose committed
4935

4936 4937 4938
	/* Setup the wait queue for indicating transition to down status */
	init_waitqueue_head(&adapter->down_waitqueue);

4939 4940 4941
	/* Setup the wait queue for indicating virtchannel events */
	init_waitqueue_head(&adapter->vc_waitqueue);

Greg Rose's avatar
Greg Rose committed
4942 4943 4944 4945 4946
	return 0;

err_ioremap:
	free_netdev(netdev);
err_alloc_etherdev:
4947
	pci_disable_pcie_error_reporting(pdev);
Greg Rose's avatar
Greg Rose committed
4948 4949 4950 4951 4952 4953 4954 4955
	pci_release_regions(pdev);
err_pci_reg:
err_dma:
	pci_disable_device(pdev);
	return err;
}

/**
4956
 * iavf_suspend - Power management suspend routine
4957
 * @dev_d: device info pointer
Greg Rose's avatar
Greg Rose committed
4958 4959 4960
 *
 * Called when the system (VM) is entering sleep/suspend.
 **/
4961
static int __maybe_unused iavf_suspend(struct device *dev_d)
Greg Rose's avatar
Greg Rose committed
4962
{
4963
	struct net_device *netdev = dev_get_drvdata(dev_d);
4964
	struct iavf_adapter *adapter = netdev_priv(netdev);
Greg Rose's avatar
Greg Rose committed
4965 4966 4967

	netif_device_detach(netdev);

4968
	while (!mutex_trylock(&adapter->crit_lock))
4969 4970
		usleep_range(500, 1000);

Greg Rose's avatar
Greg Rose committed
4971 4972
	if (netif_running(netdev)) {
		rtnl_lock();
4973
		iavf_down(adapter);
Greg Rose's avatar
Greg Rose committed
4974 4975
		rtnl_unlock();
	}
4976 4977
	iavf_free_misc_irq(adapter);
	iavf_reset_interrupt_capability(adapter);
Greg Rose's avatar
Greg Rose committed
4978

4979
	mutex_unlock(&adapter->crit_lock);
4980

Greg Rose's avatar
Greg Rose committed
4981 4982 4983 4984
	return 0;
}

/**
4985
 * iavf_resume - Power management resume routine
4986
 * @dev_d: device info pointer
Greg Rose's avatar
Greg Rose committed
4987 4988 4989
 *
 * Called when the system (VM) is resumed from sleep/suspend.
 **/
4990
static int __maybe_unused iavf_resume(struct device *dev_d)
Greg Rose's avatar
Greg Rose committed
4991
{
4992
	struct pci_dev *pdev = to_pci_dev(dev_d);
4993
	struct iavf_adapter *adapter;
Greg Rose's avatar
Greg Rose committed
4994 4995
	u32 err;

4996 4997
	adapter = iavf_pdev_to_adapter(pdev);

Greg Rose's avatar
Greg Rose committed
4998 4999 5000
	pci_set_master(pdev);

	rtnl_lock();
5001
	err = iavf_set_interrupt_capability(adapter);
Greg Rose's avatar
Greg Rose committed
5002
	if (err) {
5003
		rtnl_unlock();
Greg Rose's avatar
Greg Rose committed
5004 5005 5006
		dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
		return err;
	}
5007
	err = iavf_request_misc_irq(adapter);
Greg Rose's avatar
Greg Rose committed
5008 5009 5010 5011 5012 5013
	rtnl_unlock();
	if (err) {
		dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
		return err;
	}

5014
	queue_work(iavf_wq, &adapter->reset_task);
Greg Rose's avatar
Greg Rose committed
5015

5016
	netif_device_attach(adapter->netdev);
Greg Rose's avatar
Greg Rose committed
5017 5018 5019 5020 5021

	return err;
}

/**
5022
 * iavf_remove - Device Removal Routine
Greg Rose's avatar
Greg Rose committed
5023 5024
 * @pdev: PCI device information struct
 *
5025
 * iavf_remove is called by the PCI subsystem to alert the driver
Greg Rose's avatar
Greg Rose committed
5026 5027 5028 5029
 * that it should release a PCI device.  The could be caused by a
 * Hot-Plug event, or because the driver is going to be removed from
 * memory.
 **/
5030
static void iavf_remove(struct pci_dev *pdev)
Greg Rose's avatar
Greg Rose committed
5031
{
5032 5033
	struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
	struct net_device *netdev = adapter->netdev;
5034
	struct iavf_fdir_fltr *fdir, *fdirtmp;
5035
	struct iavf_vlan_filter *vlf, *vlftmp;
5036
	struct iavf_adv_rss *rss, *rsstmp;
5037 5038
	struct iavf_mac_filter *f, *ftmp;
	struct iavf_cloud_filter *cf, *cftmp;
5039
	struct iavf_hw *hw = &adapter->hw;
5040
	int err;
5041

5042 5043 5044 5045 5046 5047 5048
	/* When reboot/shutdown is in progress no need to do anything
	 * as the adapter is already REMOVE state that was set during
	 * iavf_shutdown() callback.
	 */
	if (adapter->state == __IAVF_REMOVE)
		return;

5049
	set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
5050 5051 5052 5053 5054 5055
	/* Wait until port initialization is complete.
	 * There are flows where register/unregister netdev may race.
	 */
	while (1) {
		mutex_lock(&adapter->crit_lock);
		if (adapter->state == __IAVF_RUNNING ||
5056 5057
		    adapter->state == __IAVF_DOWN ||
		    adapter->state == __IAVF_INIT_FAILED) {
5058 5059 5060 5061 5062 5063 5064
			mutex_unlock(&adapter->crit_lock);
			break;
		}

		mutex_unlock(&adapter->crit_lock);
		usleep_range(500, 1000);
	}
5065
	cancel_delayed_work_sync(&adapter->watchdog_task);
5066

Greg Rose's avatar
Greg Rose committed
5067
	if (adapter->netdev_registered) {
5068 5069
		rtnl_lock();
		unregister_netdevice(netdev);
Greg Rose's avatar
Greg Rose committed
5070
		adapter->netdev_registered = false;
5071
		rtnl_unlock();
Greg Rose's avatar
Greg Rose committed
5072
	}
5073
	if (CLIENT_ALLOWED(adapter)) {
5074
		err = iavf_lan_del_device(adapter);
5075 5076 5077 5078
		if (err)
			dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
				 err);
	}
5079

5080 5081 5082 5083
	mutex_lock(&adapter->crit_lock);
	dev_info(&adapter->pdev->dev, "Remove device\n");
	iavf_change_state(adapter, __IAVF_REMOVE);

5084
	iavf_request_reset(adapter);
5085
	msleep(50);
5086
	/* If the FW isn't responding, kick it once, but only once. */
5087 5088
	if (!iavf_asq_done(hw)) {
		iavf_request_reset(adapter);
5089
		msleep(50);
5090
	}
5091

5092
	iavf_misc_irq_disable(adapter);
5093
	/* Shut down all the garbage mashers on the detention level */
5094 5095 5096 5097 5098
	cancel_work_sync(&adapter->reset_task);
	cancel_delayed_work_sync(&adapter->watchdog_task);
	cancel_work_sync(&adapter->adminq_task);
	cancel_delayed_work_sync(&adapter->client_task);

5099 5100
	adapter->aq_required = 0;
	adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5101

5102 5103 5104
	iavf_free_all_tx_resources(adapter);
	iavf_free_all_rx_resources(adapter);
	iavf_free_misc_irq(adapter);
5105

5106 5107
	iavf_reset_interrupt_capability(adapter);
	iavf_free_q_vectors(adapter);
Greg Rose's avatar
Greg Rose committed
5108

5109
	iavf_free_rss(adapter);
5110

Greg Rose's avatar
Greg Rose committed
5111
	if (hw->aq.asq.count)
5112
		iavf_shutdown_adminq(hw);
Greg Rose's avatar
Greg Rose committed
5113

5114 5115 5116
	/* destroy the locks only once, here */
	mutex_destroy(&hw->aq.arq_mutex);
	mutex_destroy(&hw->aq.asq_mutex);
5117 5118 5119
	mutex_destroy(&adapter->client_lock);
	mutex_unlock(&adapter->crit_lock);
	mutex_destroy(&adapter->crit_lock);
5120

Greg Rose's avatar
Greg Rose committed
5121 5122
	iounmap(hw->hw_addr);
	pci_release_regions(pdev);
5123
	iavf_free_queues(adapter);
Greg Rose's avatar
Greg Rose committed
5124
	kfree(adapter->vf_res);
5125
	spin_lock_bh(&adapter->mac_vlan_list_lock);
5126 5127 5128 5129 5130 5131 5132
	/* If we got removed before an up/down sequence, we've got a filter
	 * hanging out there that we need to get rid of.
	 */
	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
		list_del(&f->list);
		kfree(f);
	}
5133 5134 5135 5136
	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
				 list) {
		list_del(&vlf->list);
		kfree(vlf);
5137
	}
Greg Rose's avatar
Greg Rose committed
5138

5139 5140
	spin_unlock_bh(&adapter->mac_vlan_list_lock);

5141 5142 5143 5144 5145 5146 5147
	spin_lock_bh(&adapter->cloud_filter_list_lock);
	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
		list_del(&cf->list);
		kfree(cf);
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);

5148 5149 5150 5151 5152 5153 5154
	spin_lock_bh(&adapter->fdir_fltr_lock);
	list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
		list_del(&fdir->list);
		kfree(fdir);
	}
	spin_unlock_bh(&adapter->fdir_fltr_lock);

5155 5156 5157 5158 5159 5160 5161 5162
	spin_lock_bh(&adapter->adv_rss_lock);
	list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
				 list) {
		list_del(&rss->list);
		kfree(rss);
	}
	spin_unlock_bh(&adapter->adv_rss_lock);

Greg Rose's avatar
Greg Rose committed
5163 5164 5165 5166 5167 5168 5169
	free_netdev(netdev);

	pci_disable_pcie_error_reporting(pdev);

	pci_disable_device(pdev);
}

5170 5171
static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);

5172
static struct pci_driver iavf_driver = {
5173 5174 5175 5176 5177 5178
	.name      = iavf_driver_name,
	.id_table  = iavf_pci_tbl,
	.probe     = iavf_probe,
	.remove    = iavf_remove,
	.driver.pm = &iavf_pm_ops,
	.shutdown  = iavf_shutdown,
Greg Rose's avatar
Greg Rose committed
5179 5180 5181
};

/**
5182
 * iavf_init_module - Driver Registration Routine
Greg Rose's avatar
Greg Rose committed
5183
 *
5184
 * iavf_init_module is the first routine called when the driver is
Greg Rose's avatar
Greg Rose committed
5185 5186
 * loaded. All it does is register with the PCI subsystem.
 **/
5187
static int __init iavf_init_module(void)
Greg Rose's avatar
Greg Rose committed
5188
{
5189
	pr_info("iavf: %s\n", iavf_driver_string);
Greg Rose's avatar
Greg Rose committed
5190

5191
	pr_info("%s\n", iavf_copyright);
Greg Rose's avatar
Greg Rose committed
5192

5193 5194 5195 5196
	iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
				  iavf_driver_name);
	if (!iavf_wq) {
		pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
5197 5198
		return -ENOMEM;
	}
5199
	return pci_register_driver(&iavf_driver);
Greg Rose's avatar
Greg Rose committed
5200 5201
}

5202
module_init(iavf_init_module);
Greg Rose's avatar
Greg Rose committed
5203 5204

/**
5205
 * iavf_exit_module - Driver Exit Cleanup Routine
Greg Rose's avatar
Greg Rose committed
5206
 *
5207
 * iavf_exit_module is called just before the driver is removed
Greg Rose's avatar
Greg Rose committed
5208 5209
 * from memory.
 **/
5210
static void __exit iavf_exit_module(void)
Greg Rose's avatar
Greg Rose committed
5211
{
5212 5213
	pci_unregister_driver(&iavf_driver);
	destroy_workqueue(iavf_wq);
Greg Rose's avatar
Greg Rose committed
5214 5215
}

5216
module_exit(iavf_exit_module);
Greg Rose's avatar
Greg Rose committed
5217

5218
/* iavf_main.c */