Lines Matching +full:speed +full:- +full:bins

1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
27 return le16_to_cpu(entity_id) - MAX_NUM_PFS; in qed_vf_from_entity_id()
34 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == in qed_vf_calculate_legacy()
38 if (!(p_vf->acquire.vfdev_info.capabilities & in qed_vf_calculate_legacy()
51 int rc = -EINVAL; in qed_sp_vf_start()
57 init_data.opaque_fid = p_vf->opaque_fid; in qed_sp_vf_start()
66 p_ramrod = &p_ent->ramrod.vf_start; in qed_sp_vf_start()
68 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); in qed_sp_vf_start()
69 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); in qed_sp_vf_start()
71 switch (p_hwfn->hw_info.personality) { in qed_sp_vf_start()
73 p_ramrod->personality = PERSONALITY_ETH; in qed_sp_vf_start()
77 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; in qed_sp_vf_start()
81 p_hwfn->hw_info.personality); in qed_sp_vf_start()
83 return -EINVAL; in qed_sp_vf_start()
86 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; in qed_sp_vf_start()
91 …"VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs… in qed_sp_vf_start()
92 p_vf->abs_vf_id, in qed_sp_vf_start()
98 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; in qed_sp_vf_start()
99 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; in qed_sp_vf_start()
102 "VF[%d] - Starting using HSI %02x.%02x\n", in qed_sp_vf_start()
103 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); in qed_sp_vf_start()
114 int rc = -EINVAL; in qed_sp_vf_stop()
128 p_ramrod = &p_ent->ramrod.vf_stop; in qed_sp_vf_stop()
130 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); in qed_sp_vf_stop()
139 if (!p_hwfn->pf_iov_info) { in qed_iov_is_valid_vfid()
140 DP_NOTICE(p_hwfn->cdev, "No iov info\n"); in qed_iov_is_valid_vfid()
144 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || in qed_iov_is_valid_vfid()
148 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && in qed_iov_is_valid_vfid()
152 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && in qed_iov_is_valid_vfid()
165 if (!p_hwfn->pf_iov_info) { in qed_iov_get_vf_info()
166 DP_NOTICE(p_hwfn->cdev, "No iov info\n"); in qed_iov_get_vf_info()
172 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; in qed_iov_get_vf_info()
186 if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx) in qed_iov_get_vf_rx_queue_cid()
187 return p_queue->cids[i].p_cid; in qed_iov_get_vf_rx_queue_cid()
213 p_qcid = &p_vf->vf_queues[qid].cids[i]; in qed_iov_validate_queue_mode()
215 if (!p_qcid->p_cid) in qed_iov_validate_queue_mode()
218 if (p_qcid->b_is_tx != b_is_tx) in qed_iov_validate_queue_mode()
233 if (rx_qid >= p_vf->num_rxqs) { in qed_iov_validate_rxq()
236 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", in qed_iov_validate_rxq()
237 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); in qed_iov_validate_rxq()
249 if (tx_qid >= p_vf->num_txqs) { in qed_iov_validate_txq()
252 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", in qed_iov_validate_txq()
253 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); in qed_iov_validate_txq()
265 for (i = 0; i < p_vf->num_sbs; i++) in qed_iov_validate_sb()
266 if (p_vf->igu_sbs[i] == sb_idx) in qed_iov_validate_sb()
271 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", in qed_iov_validate_sb()
272 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); in qed_iov_validate_sb()
282 for (i = 0; i < p_vf->num_rxqs; i++) in qed_iov_validate_active_rxq()
296 for (i = 0; i < p_vf->num_txqs; i++) in qed_iov_validate_active_txq()
309 int crc_size = sizeof(p_bulletin->crc); in qed_iov_post_vf_bulletin()
315 return -EINVAL; in qed_iov_post_vf_bulletin()
317 if (!p_vf->vf_bulletin) in qed_iov_post_vf_bulletin()
318 return -EINVAL; in qed_iov_post_vf_bulletin()
320 p_bulletin = p_vf->bulletin.p_virt; in qed_iov_post_vf_bulletin()
323 p_bulletin->version++; in qed_iov_post_vf_bulletin()
324 p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, in qed_iov_post_vf_bulletin()
325 p_vf->bulletin.size - crc_size); in qed_iov_post_vf_bulletin()
329 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); in qed_iov_post_vf_bulletin()
334 params.dst_vfid = p_vf->abs_vf_id; in qed_iov_post_vf_bulletin()
335 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, in qed_iov_post_vf_bulletin()
336 p_vf->vf_bulletin, p_vf->bulletin.size / 4, in qed_iov_post_vf_bulletin()
342 struct qed_hw_sriov_info *iov = cdev->p_iov_info; in qed_iov_pci_cfg_info()
343 int pos = iov->pos; in qed_iov_pci_cfg_info()
346 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); in qed_iov_pci_cfg_info()
348 pci_read_config_word(cdev->pdev, in qed_iov_pci_cfg_info()
349 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); in qed_iov_pci_cfg_info()
350 pci_read_config_word(cdev->pdev, in qed_iov_pci_cfg_info()
351 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); in qed_iov_pci_cfg_info()
353 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); in qed_iov_pci_cfg_info()
354 if (iov->num_vfs) { in qed_iov_pci_cfg_info()
357 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); in qed_iov_pci_cfg_info()
358 iov->num_vfs = 0; in qed_iov_pci_cfg_info()
361 pci_read_config_word(cdev->pdev, in qed_iov_pci_cfg_info()
362 pos + PCI_SRIOV_VF_OFFSET, &iov->offset); in qed_iov_pci_cfg_info()
364 pci_read_config_word(cdev->pdev, in qed_iov_pci_cfg_info()
365 pos + PCI_SRIOV_VF_STRIDE, &iov->stride); in qed_iov_pci_cfg_info()
367 pci_read_config_word(cdev->pdev, in qed_iov_pci_cfg_info()
368 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); in qed_iov_pci_cfg_info()
370 pci_read_config_dword(cdev->pdev, in qed_iov_pci_cfg_info()
371 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); in qed_iov_pci_cfg_info()
373 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); in qed_iov_pci_cfg_info()
375 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); in qed_iov_pci_cfg_info()
380 iov->nres, in qed_iov_pci_cfg_info()
381 iov->cap, in qed_iov_pci_cfg_info()
382 iov->ctrl, in qed_iov_pci_cfg_info()
383 iov->total_vfs, in qed_iov_pci_cfg_info()
384 iov->initial_vfs, in qed_iov_pci_cfg_info()
385 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); in qed_iov_pci_cfg_info()
388 if (iov->num_vfs > NUM_OF_VFS(cdev) || in qed_iov_pci_cfg_info()
389 iov->total_vfs > NUM_OF_VFS(cdev)) { in qed_iov_pci_cfg_info()
396 iov->num_vfs); in qed_iov_pci_cfg_info()
398 iov->num_vfs = 0; in qed_iov_pci_cfg_info()
399 iov->total_vfs = 0; in qed_iov_pci_cfg_info()
407 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; in qed_iov_setup_vfdb()
408 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; in qed_iov_setup_vfdb()
415 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); in qed_iov_setup_vfdb()
417 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; in qed_iov_setup_vfdb()
418 req_p = p_iov_info->mbx_msg_phys_addr; in qed_iov_setup_vfdb()
419 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; in qed_iov_setup_vfdb()
420 rply_p = p_iov_info->mbx_reply_phys_addr; in qed_iov_setup_vfdb()
421 p_bulletin_virt = p_iov_info->p_bulletins; in qed_iov_setup_vfdb()
422 bulletin_p = p_iov_info->bulletins_phys; in qed_iov_setup_vfdb()
429 for (idx = 0; idx < p_iov->total_vfs; idx++) { in qed_iov_setup_vfdb()
430 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; in qed_iov_setup_vfdb()
433 vf->vf_mbx.req_virt = p_req_virt_addr + idx; in qed_iov_setup_vfdb()
434 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); in qed_iov_setup_vfdb()
435 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; in qed_iov_setup_vfdb()
436 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); in qed_iov_setup_vfdb()
438 vf->state = VF_STOPPED; in qed_iov_setup_vfdb()
439 vf->b_init = false; in qed_iov_setup_vfdb()
441 vf->bulletin.phys = idx * in qed_iov_setup_vfdb()
444 vf->bulletin.p_virt = p_bulletin_virt + idx; in qed_iov_setup_vfdb()
445 vf->bulletin.size = sizeof(struct qed_bulletin_content); in qed_iov_setup_vfdb()
447 vf->relative_vf_id = idx; in qed_iov_setup_vfdb()
448 vf->abs_vf_id = idx + p_iov->first_vf_in_pf; in qed_iov_setup_vfdb()
449 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); in qed_iov_setup_vfdb()
450 vf->concrete_fid = concrete; in qed_iov_setup_vfdb()
451 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | in qed_iov_setup_vfdb()
452 (vf->abs_vf_id << 8); in qed_iov_setup_vfdb()
453 vf->vport_id = idx + 1; in qed_iov_setup_vfdb()
455 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; in qed_iov_setup_vfdb()
456 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; in qed_iov_setup_vfdb()
462 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; in qed_iov_allocate_vfdb()
466 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; in qed_iov_allocate_vfdb()
471 /* Allocate PF Mailbox buffer (per-VF) */ in qed_iov_allocate_vfdb()
472 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; in qed_iov_allocate_vfdb()
473 p_v_addr = &p_iov_info->mbx_msg_virt_addr; in qed_iov_allocate_vfdb()
474 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, in qed_iov_allocate_vfdb()
475 p_iov_info->mbx_msg_size, in qed_iov_allocate_vfdb()
476 &p_iov_info->mbx_msg_phys_addr, in qed_iov_allocate_vfdb()
479 return -ENOMEM; in qed_iov_allocate_vfdb()
481 /* Allocate PF Mailbox Reply buffer (per-VF) */ in qed_iov_allocate_vfdb()
482 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; in qed_iov_allocate_vfdb()
483 p_v_addr = &p_iov_info->mbx_reply_virt_addr; in qed_iov_allocate_vfdb()
484 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, in qed_iov_allocate_vfdb()
485 p_iov_info->mbx_reply_size, in qed_iov_allocate_vfdb()
486 &p_iov_info->mbx_reply_phys_addr, in qed_iov_allocate_vfdb()
489 return -ENOMEM; in qed_iov_allocate_vfdb()
491 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * in qed_iov_allocate_vfdb()
493 p_v_addr = &p_iov_info->p_bulletins; in qed_iov_allocate_vfdb()
494 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, in qed_iov_allocate_vfdb()
495 p_iov_info->bulletins_size, in qed_iov_allocate_vfdb()
496 &p_iov_info->bulletins_phys, in qed_iov_allocate_vfdb()
499 return -ENOMEM; in qed_iov_allocate_vfdb()
504 p_iov_info->mbx_msg_virt_addr, in qed_iov_allocate_vfdb()
505 (u64)p_iov_info->mbx_msg_phys_addr, in qed_iov_allocate_vfdb()
506 p_iov_info->mbx_reply_virt_addr, in qed_iov_allocate_vfdb()
507 (u64)p_iov_info->mbx_reply_phys_addr, in qed_iov_allocate_vfdb()
508 p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys); in qed_iov_allocate_vfdb()
515 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; in qed_iov_free_vfdb()
517 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) in qed_iov_free_vfdb()
518 dma_free_coherent(&p_hwfn->cdev->pdev->dev, in qed_iov_free_vfdb()
519 p_iov_info->mbx_msg_size, in qed_iov_free_vfdb()
520 p_iov_info->mbx_msg_virt_addr, in qed_iov_free_vfdb()
521 p_iov_info->mbx_msg_phys_addr); in qed_iov_free_vfdb()
523 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) in qed_iov_free_vfdb()
524 dma_free_coherent(&p_hwfn->cdev->pdev->dev, in qed_iov_free_vfdb()
525 p_iov_info->mbx_reply_size, in qed_iov_free_vfdb()
526 p_iov_info->mbx_reply_virt_addr, in qed_iov_free_vfdb()
527 p_iov_info->mbx_reply_phys_addr); in qed_iov_free_vfdb()
529 if (p_iov_info->p_bulletins) in qed_iov_free_vfdb()
530 dma_free_coherent(&p_hwfn->cdev->pdev->dev, in qed_iov_free_vfdb()
531 p_iov_info->bulletins_size, in qed_iov_free_vfdb()
532 p_iov_info->p_bulletins, in qed_iov_free_vfdb()
533 p_iov_info->bulletins_phys); in qed_iov_free_vfdb()
542 "No SR-IOV - no need for IOV db\n"); in qed_iov_alloc()
548 return -ENOMEM; in qed_iov_alloc()
550 p_hwfn->pf_iov_info = p_sriov; in qed_iov_alloc()
572 kfree(p_hwfn->pf_iov_info); in qed_iov_free()
578 kfree(cdev->p_iov_info); in qed_iov_free_hw_info()
579 cdev->p_iov_info = NULL; in qed_iov_free_hw_info()
584 struct qed_dev *cdev = p_hwfn->cdev; in qed_iov_hw_info()
591 if (IS_VF(p_hwfn->cdev)) in qed_iov_hw_info()
595 pos = pci_find_ext_capability(p_hwfn->cdev->pdev, in qed_iov_hw_info()
603 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); in qed_iov_hw_info()
604 if (!cdev->p_iov_info) in qed_iov_hw_info()
605 return -ENOMEM; in qed_iov_hw_info()
607 cdev->p_iov_info->pos = pos; in qed_iov_hw_info()
615 * de-allocate the struct. in qed_iov_hw_info()
617 if (!cdev->p_iov_info->total_vfs) { in qed_iov_hw_info()
620 kfree(cdev->p_iov_info); in qed_iov_hw_info()
621 cdev->p_iov_info = NULL; in qed_iov_hw_info()
626 * - If ARI is supported [likely], offset - (16 - pf_id) would in qed_iov_hw_info()
629 * - If !ARI, VFs would start on next device. in qed_iov_hw_info()
630 * so offset - (256 - pf_id) would provide the number. in qed_iov_hw_info()
631 * Utilize the fact that (256 - pf_id) is achieved only by later in qed_iov_hw_info()
635 if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { in qed_iov_hw_info()
636 u32 first = p_hwfn->cdev->p_iov_info->offset + in qed_iov_hw_info()
637 p_hwfn->abs_pf_id - 16; in qed_iov_hw_info()
639 cdev->p_iov_info->first_vf_in_pf = first; in qed_iov_hw_info()
642 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; in qed_iov_hw_info()
644 u32 first = p_hwfn->cdev->p_iov_info->offset + in qed_iov_hw_info()
645 p_hwfn->abs_pf_id - 256; in qed_iov_hw_info()
647 cdev->p_iov_info->first_vf_in_pf = first; in qed_iov_hw_info()
652 cdev->p_iov_info->first_vf_in_pf); in qed_iov_hw_info()
661 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || in _qed_iov_pf_sanity_check()
684 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; in qed_iov_set_vf_to_disable()
690 vf->to_disable = to_disable; in qed_iov_set_vf_to_disable()
701 for (i = 0; i < cdev->p_iov_info->total_vfs; i++) in qed_iov_set_vfs_to_disable()
718 /* Set VF masks and configuration - pretend */ in qed_iov_vf_igu_reset()
719 qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); in qed_iov_vf_igu_reset()
724 qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); in qed_iov_vf_igu_reset()
727 for (i = 0; i < vf->num_sbs; i++) in qed_iov_vf_igu_reset()
729 vf->igu_sbs[i], in qed_iov_vf_igu_reset()
730 vf->opaque_fid, true); in qed_iov_vf_igu_reset()
739 qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); in qed_iov_vf_igu_set_int()
751 qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); in qed_iov_vf_igu_set_int()
761 /* For AH onward, configuration is per-PF. Find maximum of all in qed_iov_enable_vf_access_msix()
764 if (!QED_IS_BB(p_hwfn->cdev)) { in qed_iov_enable_vf_access_msix()
772 current_max = max_t(u8, current_max, p_vf->num_sbs); in qed_iov_enable_vf_access_msix()
790 /* It's possible VF was previously considered malicious - in qed_iov_enable_vf_access()
793 vf->b_malicious = false; in qed_iov_enable_vf_access()
795 if (vf->to_disable) in qed_iov_enable_vf_access()
801 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); in qed_iov_enable_vf_access()
808 vf->abs_vf_id, vf->num_sbs); in qed_iov_enable_vf_access()
812 qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); in qed_iov_enable_vf_access()
814 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); in qed_iov_enable_vf_access()
817 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, in qed_iov_enable_vf_access()
818 p_hwfn->hw_info.hw_mode); in qed_iov_enable_vf_access()
821 qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); in qed_iov_enable_vf_access()
823 vf->state = VF_FREE; in qed_iov_enable_vf_access()
829 * qed_iov_config_perm_table() - Configure the permission zone table.
849 for (qid = 0; qid < vf->num_rxqs; qid++) { in qed_iov_config_perm_table()
850 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, in qed_iov_config_perm_table()
854 val = enable ? (vf->abs_vf_id | BIT(8)) : 0; in qed_iov_config_perm_table()
863 /* Reset vf in IGU - interrupts are still disabled */ in qed_iov_enable_vf_traffic()
881 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) in qed_iov_alloc_vf_igu_sbs()
882 num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; in qed_iov_alloc_vf_igu_sbs()
883 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; in qed_iov_alloc_vf_igu_sbs()
885 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); in qed_iov_alloc_vf_igu_sbs()
891 vf->igu_sbs[qid] = p_block->igu_sb_id; in qed_iov_alloc_vf_igu_sbs()
892 p_block->status &= ~QED_IGU_STATUS_FREE; in qed_iov_alloc_vf_igu_sbs()
897 sizeof(u32) * p_block->igu_sb_id, val); in qed_iov_alloc_vf_igu_sbs()
901 p_hwfn->rel_pf_id, vf->abs_vf_id, 1); in qed_iov_alloc_vf_igu_sbs()
906 p_block->igu_sb_id * sizeof(u64), 2, NULL); in qed_iov_alloc_vf_igu_sbs()
909 vf->num_sbs = (u8)num_rx_queues; in qed_iov_alloc_vf_igu_sbs()
911 return vf->num_sbs; in qed_iov_alloc_vf_igu_sbs()
918 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; in qed_iov_free_vf_igu_sbs()
923 for (idx = 0; idx < vf->num_sbs; idx++) { in qed_iov_free_vf_igu_sbs()
924 igu_id = vf->igu_sbs[idx]; in qed_iov_free_vf_igu_sbs()
931 p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; in qed_iov_free_vf_igu_sbs()
932 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; in qed_iov_free_vf_igu_sbs()
935 vf->num_sbs = 0; in qed_iov_free_vf_igu_sbs()
952 p_bulletin = p_vf->bulletin.p_virt; in qed_iov_set_link()
953 p_bulletin->req_autoneg = params->speed.autoneg; in qed_iov_set_link()
954 p_bulletin->req_adv_speed = params->speed.advertised_speeds; in qed_iov_set_link()
955 p_bulletin->req_forced_speed = params->speed.forced_speed; in qed_iov_set_link()
956 p_bulletin->req_autoneg_pause = params->pause.autoneg; in qed_iov_set_link()
957 p_bulletin->req_forced_rx = params->pause.forced_rx; in qed_iov_set_link()
958 p_bulletin->req_forced_tx = params->pause.forced_tx; in qed_iov_set_link()
959 p_bulletin->req_loopback = params->loopback_mode; in qed_iov_set_link()
961 p_bulletin->link_up = link->link_up; in qed_iov_set_link()
962 p_bulletin->speed = link->speed; in qed_iov_set_link()
963 p_bulletin->full_duplex = link->full_duplex; in qed_iov_set_link()
964 p_bulletin->autoneg = link->an; in qed_iov_set_link()
965 p_bulletin->autoneg_complete = link->an_complete; in qed_iov_set_link()
966 p_bulletin->parallel_detection = link->parallel_detection; in qed_iov_set_link()
967 p_bulletin->pfc_enabled = link->pfc_enabled; in qed_iov_set_link()
968 p_bulletin->partner_adv_speed = link->partner_adv_speed; in qed_iov_set_link()
969 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; in qed_iov_set_link()
970 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; in qed_iov_set_link()
971 p_bulletin->partner_adv_pause = link->partner_adv_pause; in qed_iov_set_link()
972 p_bulletin->sfp_tx_fault = link->sfp_tx_fault; in qed_iov_set_link()
974 p_bulletin->capability_speed = p_caps->speed_capabilities; in qed_iov_set_link()
991 vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); in qed_iov_init_hw_for_vf()
994 return -EINVAL; in qed_iov_init_hw_for_vf()
997 if (vf->b_init) { in qed_iov_init_hw_for_vf()
999 p_params->rel_vf_id); in qed_iov_init_hw_for_vf()
1000 return -EINVAL; in qed_iov_init_hw_for_vf()
1004 for (i = 0; i < p_params->num_queues; i++) { in qed_iov_init_hw_for_vf()
1007 FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1; in qed_iov_init_hw_for_vf()
1009 qid = p_params->req_rx_queue[i]; in qed_iov_init_hw_for_vf()
1014 p_params->rel_vf_id, in qed_iov_init_hw_for_vf()
1016 return -EINVAL; in qed_iov_init_hw_for_vf()
1019 qid = p_params->req_tx_queue[i]; in qed_iov_init_hw_for_vf()
1023 qid, p_params->rel_vf_id, max_vf_qzone); in qed_iov_init_hw_for_vf()
1024 return -EINVAL; in qed_iov_init_hw_for_vf()
1032 p_params->rel_vf_id, qid, i); in qed_iov_init_hw_for_vf()
1039 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", in qed_iov_init_hw_for_vf()
1040 vf->relative_vf_id, p_params->num_queues, (u16)cids); in qed_iov_init_hw_for_vf()
1041 num_irqs = min_t(u16, p_params->num_queues, ((u16)cids)); in qed_iov_init_hw_for_vf()
1048 return -ENOMEM; in qed_iov_init_hw_for_vf()
1052 vf->num_rxqs = num_of_vf_avaiable_chains; in qed_iov_init_hw_for_vf()
1053 vf->num_txqs = num_of_vf_avaiable_chains; in qed_iov_init_hw_for_vf()
1055 for (i = 0; i < vf->num_rxqs; i++) { in qed_iov_init_hw_for_vf()
1056 struct qed_vf_queue *p_queue = &vf->vf_queues[i]; in qed_iov_init_hw_for_vf()
1058 p_queue->fw_rx_qid = p_params->req_rx_queue[i]; in qed_iov_init_hw_for_vf()
1059 p_queue->fw_tx_qid = p_params->req_tx_queue[i]; in qed_iov_init_hw_for_vf()
1062 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n", in qed_iov_init_hw_for_vf()
1063 vf->relative_vf_id, i, vf->igu_sbs[i], in qed_iov_init_hw_for_vf()
1064 p_queue->fw_rx_qid, p_queue->fw_tx_qid); in qed_iov_init_hw_for_vf()
1073 qed_iov_set_link(p_hwfn, p_params->rel_vf_id, in qed_iov_init_hw_for_vf()
1078 vf->b_init = true; in qed_iov_init_hw_for_vf()
1081 p_hwfn->cdev->p_iov_info->num_vfs++; in qed_iov_init_hw_for_vf()
1098 return -EINVAL; in qed_iov_release_hw_for_vf()
1101 if (vf->bulletin.p_virt) in qed_iov_release_hw_for_vf()
1102 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); in qed_iov_release_hw_for_vf()
1104 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); in qed_iov_release_hw_for_vf()
1107 * that when VFs are re-enabled they get the actual in qed_iov_release_hw_for_vf()
1116 memset(&vf->acquire, 0, sizeof(vf->acquire)); in qed_iov_release_hw_for_vf()
1119 * vf-close, however, we could get here without going through vf_close in qed_iov_release_hw_for_vf()
1127 vf->num_rxqs = 0; in qed_iov_release_hw_for_vf()
1128 vf->num_txqs = 0; in qed_iov_release_hw_for_vf()
1131 if (vf->b_init) { in qed_iov_release_hw_for_vf()
1132 vf->b_init = false; in qed_iov_release_hw_for_vf()
1135 p_hwfn->cdev->p_iov_info->num_vfs--; in qed_iov_release_hw_for_vf()
1151 tl->type = type; in qed_add_tlv()
1152 tl->length = length; in qed_add_tlv()
1158 return *offset - length; in qed_add_tlv()
1173 i, tlv->type, tlv->length); in qed_dp_tlv_list()
1175 if (tlv->type == CHANNEL_TLV_LIST_END) in qed_dp_tlv_list()
1178 /* Validate entry - protect against malicious VFs */ in qed_dp_tlv_list()
1179 if (!tlv->length) { in qed_dp_tlv_list()
1184 total_length += tlv->length; in qed_dp_tlv_list()
1200 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_send_response()
1204 mbx->reply_virt->default_resp.hdr.status = status; in qed_iov_send_response()
1206 qed_dp_tlv_list(p_hwfn, mbx->reply_virt); in qed_iov_send_response()
1208 eng_vf_id = p_vf->abs_vf_id; in qed_iov_send_response()
1214 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), in qed_iov_send_response()
1215 mbx->req_virt->first_tlv.reply_address + in qed_iov_send_response()
1217 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, in qed_iov_send_response()
1222 * channel would be re-set to ready prior to that. in qed_iov_send_response()
1228 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, in qed_iov_send_response()
1229 mbx->req_virt->first_tlv.reply_address, in qed_iov_send_response()
1267 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); in qed_iov_prep_vp_update_resp_tlvs()
1268 p_mbx->offset = (u8 *)p_mbx->reply_virt; in qed_iov_prep_vp_update_resp_tlvs()
1272 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); in qed_iov_prep_vp_update_resp_tlvs()
1279 resp = qed_add_tlv(p_hwfn, &p_mbx->offset, in qed_iov_prep_vp_update_resp_tlvs()
1283 resp->hdr.status = status; in qed_iov_prep_vp_update_resp_tlvs()
1285 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; in qed_iov_prep_vp_update_resp_tlvs()
1289 "VF[%d] - vport_update response: TLV %d, status %02x\n", in qed_iov_prep_vp_update_resp_tlvs()
1290 p_vf->relative_vf_id, in qed_iov_prep_vp_update_resp_tlvs()
1291 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); in qed_iov_prep_vp_update_resp_tlvs()
1296 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, in qed_iov_prep_vp_update_resp_tlvs()
1307 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; in qed_iov_prepare_resp()
1309 mbx->offset = (u8 *)mbx->reply_virt; in qed_iov_prepare_resp()
1311 qed_add_tlv(p_hwfn, &mbx->offset, type, length); in qed_iov_prepare_resp()
1312 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, in qed_iov_prepare_resp()
1329 return &vf->p_vf_info; in qed_iov_get_public_vf_info()
1342 eth_zero_addr(vf_info->mac); in qed_iov_clean_vf()
1344 vf_info->rx_accept_mode = 0; in qed_iov_clean_vf()
1345 vf_info->tx_accept_mode = 0; in qed_iov_clean_vf()
1353 p_vf->vf_bulletin = 0; in qed_iov_vf_cleanup()
1354 p_vf->vport_instance = 0; in qed_iov_vf_cleanup()
1355 p_vf->configured_features = 0; in qed_iov_vf_cleanup()
1358 p_vf->num_rxqs = p_vf->num_sbs; in qed_iov_vf_cleanup()
1359 p_vf->num_txqs = p_vf->num_sbs; in qed_iov_vf_cleanup()
1361 p_vf->num_active_rxqs = 0; in qed_iov_vf_cleanup()
1364 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; in qed_iov_vf_cleanup()
1367 if (!p_queue->cids[j].p_cid) in qed_iov_vf_cleanup()
1371 p_queue->cids[j].p_cid); in qed_iov_vf_cleanup()
1372 p_queue->cids[j].p_cid = NULL; in qed_iov_vf_cleanup()
1376 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); in qed_iov_vf_cleanup()
1377 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); in qed_iov_vf_cleanup()
1378 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); in qed_iov_vf_cleanup()
1399 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons; in qed_iov_vf_mbx_acquire_resc_cids()
1400 u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) - in qed_iov_vf_mbx_acquire_resc_cids()
1404 p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons); in qed_iov_vf_mbx_acquire_resc_cids()
1410 if (!(p_vf->acquire.vfdev_info.capabilities & in qed_iov_vf_mbx_acquire_resc_cids()
1418 if (p_vf->acquire.vfdev_info.capabilities & in qed_iov_vf_mbx_acquire_resc_cids()
1424 if (p_hwfn->cdev->num_hwfns > 1) in qed_iov_vf_mbx_acquire_resc_cids()
1431 p_resp->num_cids = min_t(u8, p_resp->num_cids, in qed_iov_vf_mbx_acquire_resc_cids()
1444 p_resp->num_rxqs = p_vf->num_rxqs; in qed_iov_vf_mbx_acquire_resc()
1445 p_resp->num_txqs = p_vf->num_txqs; in qed_iov_vf_mbx_acquire_resc()
1446 p_resp->num_sbs = p_vf->num_sbs; in qed_iov_vf_mbx_acquire_resc()
1448 for (i = 0; i < p_resp->num_sbs; i++) { in qed_iov_vf_mbx_acquire_resc()
1449 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; in qed_iov_vf_mbx_acquire_resc()
1450 p_resp->hw_sbs[i].sb_qid = 0; in qed_iov_vf_mbx_acquire_resc()
1456 for (i = 0; i < p_resp->num_rxqs; i++) { in qed_iov_vf_mbx_acquire_resc()
1457 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, in qed_iov_vf_mbx_acquire_resc()
1458 (u16 *)&p_resp->hw_qid[i]); in qed_iov_vf_mbx_acquire_resc()
1459 p_resp->cid[i] = i; in qed_iov_vf_mbx_acquire_resc()
1463 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, in qed_iov_vf_mbx_acquire_resc()
1464 p_req->num_mac_filters); in qed_iov_vf_mbx_acquire_resc()
1465 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, in qed_iov_vf_mbx_acquire_resc()
1466 p_req->num_vlan_filters); in qed_iov_vf_mbx_acquire_resc()
1473 p_resp->num_mc_filters = QED_MAX_MC_ADDRS; in qed_iov_vf_mbx_acquire_resc()
1476 if (p_resp->num_rxqs < p_req->num_rxqs || in qed_iov_vf_mbx_acquire_resc()
1477 p_resp->num_txqs < p_req->num_txqs || in qed_iov_vf_mbx_acquire_resc()
1478 p_resp->num_sbs < p_req->num_sbs || in qed_iov_vf_mbx_acquire_resc()
1479 p_resp->num_mac_filters < p_req->num_mac_filters || in qed_iov_vf_mbx_acquire_resc()
1480 p_resp->num_vlan_filters < p_req->num_vlan_filters || in qed_iov_vf_mbx_acquire_resc()
1481 p_resp->num_mc_filters < p_req->num_mc_filters || in qed_iov_vf_mbx_acquire_resc()
1482 p_resp->num_cids < p_req->num_cids) { in qed_iov_vf_mbx_acquire_resc()
1485 …"VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] … in qed_iov_vf_mbx_acquire_resc()
1486 p_vf->abs_vf_id, in qed_iov_vf_mbx_acquire_resc()
1487 p_req->num_rxqs, in qed_iov_vf_mbx_acquire_resc()
1488 p_resp->num_rxqs, in qed_iov_vf_mbx_acquire_resc()
1489 p_req->num_rxqs, in qed_iov_vf_mbx_acquire_resc()
1490 p_resp->num_txqs, in qed_iov_vf_mbx_acquire_resc()
1491 p_req->num_sbs, in qed_iov_vf_mbx_acquire_resc()
1492 p_resp->num_sbs, in qed_iov_vf_mbx_acquire_resc()
1493 p_req->num_mac_filters, in qed_iov_vf_mbx_acquire_resc()
1494 p_resp->num_mac_filters, in qed_iov_vf_mbx_acquire_resc()
1495 p_req->num_vlan_filters, in qed_iov_vf_mbx_acquire_resc()
1496 p_resp->num_vlan_filters, in qed_iov_vf_mbx_acquire_resc()
1497 p_req->num_mc_filters, in qed_iov_vf_mbx_acquire_resc()
1498 p_resp->num_mc_filters, in qed_iov_vf_mbx_acquire_resc()
1499 p_req->num_cids, p_resp->num_cids); in qed_iov_vf_mbx_acquire_resc()
1504 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == in qed_iov_vf_mbx_acquire_resc()
1506 (p_vf->acquire.vfdev_info.os_type == in qed_iov_vf_mbx_acquire_resc()
1519 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + in qed_iov_vf_mbx_acquire_stats()
1522 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); in qed_iov_vf_mbx_acquire_stats()
1523 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + in qed_iov_vf_mbx_acquire_stats()
1526 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); in qed_iov_vf_mbx_acquire_stats()
1527 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + in qed_iov_vf_mbx_acquire_stats()
1530 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); in qed_iov_vf_mbx_acquire_stats()
1531 p_stats->tstats.address = 0; in qed_iov_vf_mbx_acquire_stats()
1532 p_stats->tstats.len = 0; in qed_iov_vf_mbx_acquire_stats()
1539 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; in qed_iov_vf_mbx_acquire()
1540 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; in qed_iov_vf_mbx_acquire()
1541 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; in qed_iov_vf_mbx_acquire()
1542 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; in qed_iov_vf_mbx_acquire()
1544 struct pf_vf_resc *resc = &resp->resc; in qed_iov_vf_mbx_acquire()
1550 * is supported - might be later overridden. This guarantees that in qed_iov_vf_mbx_acquire()
1553 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; in qed_iov_vf_mbx_acquire()
1554 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; in qed_iov_vf_mbx_acquire()
1556 if (vf->state != VF_FREE && vf->state != VF_STOPPED) { in qed_iov_vf_mbx_acquire()
1559 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", in qed_iov_vf_mbx_acquire()
1560 vf->abs_vf_id, vf->state); in qed_iov_vf_mbx_acquire()
1565 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { in qed_iov_vf_mbx_acquire()
1566 if (req->vfdev_info.capabilities & in qed_iov_vf_mbx_acquire()
1568 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; in qed_iov_vf_mbx_acquire()
1571 "VF[%d] is pre-fastpath HSI\n", in qed_iov_vf_mbx_acquire()
1572 vf->abs_vf_id); in qed_iov_vf_mbx_acquire()
1573 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; in qed_iov_vf_mbx_acquire()
1574 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; in qed_iov_vf_mbx_acquire()
1578 vf->abs_vf_id, in qed_iov_vf_mbx_acquire()
1579 req->vfdev_info.eth_fp_hsi_major, in qed_iov_vf_mbx_acquire()
1580 req->vfdev_info.eth_fp_hsi_minor, in qed_iov_vf_mbx_acquire()
1588 if ((p_hwfn->cdev->num_hwfns > 1) && in qed_iov_vf_mbx_acquire()
1589 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { in qed_iov_vf_mbx_acquire()
1592 vf->abs_vf_id); in qed_iov_vf_mbx_acquire()
1597 memcpy(&vf->acquire, req, sizeof(vf->acquire)); in qed_iov_vf_mbx_acquire()
1599 vf->opaque_fid = req->vfdev_info.opaque_fid; in qed_iov_vf_mbx_acquire()
1601 vf->vf_bulletin = req->bulletin_addr; in qed_iov_vf_mbx_acquire()
1602 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? in qed_iov_vf_mbx_acquire()
1603 vf->bulletin.size : req->bulletin_size; in qed_iov_vf_mbx_acquire()
1606 pfdev_info->chip_num = p_hwfn->cdev->chip_num; in qed_iov_vf_mbx_acquire()
1607 pfdev_info->db_size = 0; in qed_iov_vf_mbx_acquire()
1608 pfdev_info->indices_per_sb = PIS_PER_SB; in qed_iov_vf_mbx_acquire()
1610 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | in qed_iov_vf_mbx_acquire()
1612 if (p_hwfn->cdev->num_hwfns > 1) in qed_iov_vf_mbx_acquire()
1613 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; in qed_iov_vf_mbx_acquire()
1615 /* Share our ability to use multiple queue-ids only with VFs in qed_iov_vf_mbx_acquire()
1618 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) in qed_iov_vf_mbx_acquire()
1619 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; in qed_iov_vf_mbx_acquire()
1622 resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); in qed_iov_vf_mbx_acquire()
1624 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); in qed_iov_vf_mbx_acquire()
1626 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); in qed_iov_vf_mbx_acquire()
1628 pfdev_info->fw_major = FW_MAJOR_VERSION; in qed_iov_vf_mbx_acquire()
1629 pfdev_info->fw_minor = FW_MINOR_VERSION; in qed_iov_vf_mbx_acquire()
1630 pfdev_info->fw_rev = FW_REVISION_VERSION; in qed_iov_vf_mbx_acquire()
1631 pfdev_info->fw_eng = FW_ENGINEERING_VERSION; in qed_iov_vf_mbx_acquire()
1636 pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, in qed_iov_vf_mbx_acquire()
1637 req->vfdev_info.eth_fp_hsi_minor); in qed_iov_vf_mbx_acquire()
1638 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; in qed_iov_vf_mbx_acquire()
1639 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); in qed_iov_vf_mbx_acquire()
1641 pfdev_info->dev_type = p_hwfn->cdev->type; in qed_iov_vf_mbx_acquire()
1642 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; in qed_iov_vf_mbx_acquire()
1648 &req->resc_request, resc); in qed_iov_vf_mbx_acquire()
1655 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); in qed_iov_vf_mbx_acquire()
1661 resp->bulletin_size = vf->bulletin.size; in qed_iov_vf_mbx_acquire()
1662 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); in qed_iov_vf_mbx_acquire()
1666 … "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" in qed_iov_vf_mbx_acquire()
1667 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", in qed_iov_vf_mbx_acquire()
1668 vf->abs_vf_id, in qed_iov_vf_mbx_acquire()
1669 resp->pfdev_info.chip_num, in qed_iov_vf_mbx_acquire()
1670 resp->pfdev_info.db_size, in qed_iov_vf_mbx_acquire()
1671 resp->pfdev_info.indices_per_sb, in qed_iov_vf_mbx_acquire()
1672 resp->pfdev_info.capabilities, in qed_iov_vf_mbx_acquire()
1673 resc->num_rxqs, in qed_iov_vf_mbx_acquire()
1674 resc->num_txqs, in qed_iov_vf_mbx_acquire()
1675 resc->num_sbs, in qed_iov_vf_mbx_acquire()
1676 resc->num_mac_filters, in qed_iov_vf_mbx_acquire()
1677 resc->num_vlan_filters); in qed_iov_vf_mbx_acquire()
1678 vf->state = VF_ACQUIRED; in qed_iov_vf_mbx_acquire()
1692 if (val == p_vf->spoof_chk) { in __qed_iov_spoofchk_set()
1699 params.opaque_fid = p_vf->opaque_fid; in __qed_iov_spoofchk_set()
1700 params.vport_id = p_vf->vport_id; in __qed_iov_spoofchk_set()
1706 p_vf->spoof_chk = val; in __qed_iov_spoofchk_set()
1707 p_vf->req_spoofchk_val = p_vf->spoof_chk; in __qed_iov_spoofchk_set()
1713 val, p_vf->relative_vf_id); in __qed_iov_spoofchk_set()
1729 filter.vport_to_add_to = p_vf->vport_id; in qed_iov_reconfigure_unicast_vlan()
1734 if (!p_vf->shadow_config.vlans[i].used) in qed_iov_reconfigure_unicast_vlan()
1738 filter.vlan = p_vf->shadow_config.vlans[i].vid; in qed_iov_reconfigure_unicast_vlan()
1741 filter.vlan, p_vf->relative_vf_id); in qed_iov_reconfigure_unicast_vlan()
1742 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, in qed_iov_reconfigure_unicast_vlan()
1747 filter.vlan, p_vf->relative_vf_id); in qed_iov_reconfigure_unicast_vlan()
1762 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) in qed_iov_reconfigure_unicast_shadow()
1774 if (!p_vf->vport_instance) in qed_iov_configure_vport_forced()
1775 return -EINVAL; in qed_iov_configure_vport_forced()
1778 p_vf->p_vf_info.is_trusted_configured) { in qed_iov_configure_vport_forced()
1787 filter.vport_to_add_to = p_vf->vport_id; in qed_iov_configure_vport_forced()
1788 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); in qed_iov_configure_vport_forced()
1790 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, in qed_iov_configure_vport_forced()
1797 if (p_vf->p_vf_info.is_trusted_configured) in qed_iov_configure_vport_forced()
1798 p_vf->configured_features |= in qed_iov_configure_vport_forced()
1801 p_vf->configured_features |= in qed_iov_configure_vport_forced()
1814 filter.vport_to_add_to = p_vf->vport_id; in qed_iov_configure_vport_forced()
1815 filter.vlan = p_vf->bulletin.p_virt->pvid; in qed_iov_configure_vport_forced()
1820 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, in qed_iov_configure_vport_forced()
1828 /* Update the default-vlan & silent vlan stripping */ in qed_iov_configure_vport_forced()
1830 vport_update.opaque_fid = p_vf->opaque_fid; in qed_iov_configure_vport_forced()
1831 vport_update.vport_id = p_vf->vport_id; in qed_iov_configure_vport_forced()
1839 : p_vf->shadow_config.inner_vlan_removal; in qed_iov_configure_vport_forced()
1853 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; in qed_iov_configure_vport_forced()
1869 p_cid->rel.queue_id); in qed_iov_configure_vport_forced()
1875 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; in qed_iov_configure_vport_forced()
1877 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); in qed_iov_configure_vport_forced()
1894 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; in qed_iov_vf_mbx_start_vport()
1902 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true); in qed_iov_vf_mbx_start_vport()
1904 DP_NOTICE(p_hwfn->cdev, in qed_iov_vf_mbx_start_vport()
1906 vf->relative_vf_id); in qed_iov_vf_mbx_start_vport()
1910 vf->state = VF_ENABLED; in qed_iov_vf_mbx_start_vport()
1911 start = &mbx->req_virt->start_vport; in qed_iov_vf_mbx_start_vport()
1916 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { in qed_iov_vf_mbx_start_vport()
1917 if (!start->sb_addr[sb_id]) { in qed_iov_vf_mbx_start_vport()
1920 vf->relative_vf_id, sb_id); in qed_iov_vf_mbx_start_vport()
1925 start->sb_addr[sb_id], in qed_iov_vf_mbx_start_vport()
1926 vf->igu_sbs[sb_id], vf->abs_vf_id, 1); in qed_iov_vf_mbx_start_vport()
1929 vf->mtu = start->mtu; in qed_iov_vf_mbx_start_vport()
1930 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; in qed_iov_vf_mbx_start_vport()
1936 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; in qed_iov_vf_mbx_start_vport()
1938 u8 vf_req = start->only_untagged; in qed_iov_vf_mbx_start_vport()
1940 vf_info->bulletin.p_virt->default_only_untagged = vf_req; in qed_iov_vf_mbx_start_vport()
1944 params.tpa_mode = start->tpa_mode; in qed_iov_vf_mbx_start_vport()
1945 params.remove_inner_vlan = start->inner_vlan_removal; in qed_iov_vf_mbx_start_vport()
1948 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; in qed_iov_vf_mbx_start_vport()
1950 params.concrete_fid = vf->concrete_fid; in qed_iov_vf_mbx_start_vport()
1951 params.opaque_fid = vf->opaque_fid; in qed_iov_vf_mbx_start_vport()
1952 params.vport_id = vf->vport_id; in qed_iov_vf_mbx_start_vport()
1953 params.max_buffers_per_cqe = start->max_buffers_per_cqe; in qed_iov_vf_mbx_start_vport()
1954 params.mtu = vf->mtu; in qed_iov_vf_mbx_start_vport()
1957 params.check_mac = !vf->p_vf_info.is_trusted_configured; in qed_iov_vf_mbx_start_vport()
1965 vf->vport_instance++; in qed_iov_vf_mbx_start_vport()
1970 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); in qed_iov_vf_mbx_start_vport()
1983 vf->vport_instance--; in qed_iov_vf_mbx_stop_vport()
1984 vf->spoof_chk = false; in qed_iov_vf_mbx_stop_vport()
1988 vf->b_malicious = true; in qed_iov_vf_mbx_stop_vport()
1990 "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n", in qed_iov_vf_mbx_stop_vport()
1991 vf->abs_vf_id); in qed_iov_vf_mbx_stop_vport()
1996 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); in qed_iov_vf_mbx_stop_vport()
2004 vf->configured_features = 0; in qed_iov_vf_mbx_stop_vport()
2005 memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); in qed_iov_vf_mbx_stop_vport()
2017 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; in qed_iov_vf_mbx_start_rxq_resp()
2022 mbx->offset = (u8 *)mbx->reply_virt; in qed_iov_vf_mbx_start_rxq_resp()
2033 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, in qed_iov_vf_mbx_start_rxq_resp()
2035 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, in qed_iov_vf_mbx_start_rxq_resp()
2040 req = &mbx->req_virt->start_rxq; in qed_iov_vf_mbx_start_rxq_resp()
2041 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + in qed_iov_vf_mbx_start_rxq_resp()
2044 sizeof(struct eth_rx_prod_data) * req->rx_qid; in qed_iov_vf_mbx_start_rxq_resp()
2053 struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; in qed_iov_vf_mbx_qid()
2057 if (!(p_vf->acquire.vfdev_info.capabilities & in qed_iov_vf_mbx_qid()
2066 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, in qed_iov_vf_mbx_qid()
2071 p_vf->relative_vf_id); in qed_iov_vf_mbx_qid()
2076 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { in qed_iov_vf_mbx_qid()
2078 "VF[%02x]: Provided qid out-of-bounds %02x\n", in qed_iov_vf_mbx_qid()
2079 p_vf->relative_vf_id, p_qid_tlv->qid); in qed_iov_vf_mbx_qid()
2083 return p_qid_tlv->qid; in qed_iov_vf_mbx_qid()
2092 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; in qed_iov_vf_mbx_start_rxq()
2101 req = &mbx->req_virt->start_rxq; in qed_iov_vf_mbx_start_rxq()
2103 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid, in qed_iov_vf_mbx_start_rxq()
2105 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) in qed_iov_vf_mbx_start_rxq()
2112 p_queue = &vf->vf_queues[req->rx_qid]; in qed_iov_vf_mbx_start_rxq()
2113 if (p_queue->cids[qid_usage_idx].p_cid) in qed_iov_vf_mbx_start_rxq()
2118 /* Acquire a new queue-cid */ in qed_iov_vf_mbx_start_rxq()
2120 params.queue_id = p_queue->fw_rx_qid; in qed_iov_vf_mbx_start_rxq()
2121 params.vport_id = vf->vport_id; in qed_iov_vf_mbx_start_rxq()
2122 params.stats_id = vf->abs_vf_id + 0x10; in qed_iov_vf_mbx_start_rxq()
2125 sb_dummy.igu_sb_id = req->hw_sb; in qed_iov_vf_mbx_start_rxq()
2127 params.sb_idx = req->sb_index; in qed_iov_vf_mbx_start_rxq()
2130 vf_params.vfid = vf->relative_vf_id; in qed_iov_vf_mbx_start_rxq()
2131 vf_params.vf_qid = (u8)req->rx_qid; in qed_iov_vf_mbx_start_rxq()
2134 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, in qed_iov_vf_mbx_start_rxq()
2145 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, in qed_iov_vf_mbx_start_rxq()
2146 req->rx_qid), 0); in qed_iov_vf_mbx_start_rxq()
2149 req->bd_max_bytes, in qed_iov_vf_mbx_start_rxq()
2150 req->rxq_addr, in qed_iov_vf_mbx_start_rxq()
2151 req->cqe_pbl_addr, req->cqe_pbl_size); in qed_iov_vf_mbx_start_rxq()
2156 p_queue->cids[qid_usage_idx].p_cid = p_cid; in qed_iov_vf_mbx_start_rxq()
2157 p_queue->cids[qid_usage_idx].b_is_tx = false; in qed_iov_vf_mbx_start_rxq()
2159 vf->num_active_rxqs++; in qed_iov_vf_mbx_start_rxq()
2173 p_resp->tunn_feature_mask = tunn_feature_mask; in qed_iov_pf_update_tun_response()
2174 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; in qed_iov_pf_update_tun_response()
2175 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; in qed_iov_pf_update_tun_response()
2176 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; in qed_iov_pf_update_tun_response()
2177 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; in qed_iov_pf_update_tun_response()
2178 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; in qed_iov_pf_update_tun_response()
2179 p_resp->vxlan_clss = p_tun->vxlan.tun_cls; in qed_iov_pf_update_tun_response()
2180 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; in qed_iov_pf_update_tun_response()
2181 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; in qed_iov_pf_update_tun_response()
2182 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; in qed_iov_pf_update_tun_response()
2183 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; in qed_iov_pf_update_tun_response()
2184 p_resp->geneve_udp_port = p_tun->geneve_port.port; in qed_iov_pf_update_tun_response()
2185 p_resp->vxlan_udp_port = p_tun->vxlan_port.port; in qed_iov_pf_update_tun_response()
2193 if (p_req->tun_mode_update_mask & BIT(mask)) { in __qed_iov_pf_update_tun_param()
2194 p_tun->b_update_mode = true; in __qed_iov_pf_update_tun_param()
2196 if (p_req->tunn_mode & BIT(mask)) in __qed_iov_pf_update_tun_param()
2197 p_tun->b_mode_enabled = true; in __qed_iov_pf_update_tun_param()
2200 p_tun->tun_cls = tun_cls; in __qed_iov_pf_update_tun_param()
2211 p_port->b_update_port = true; in qed_iov_pf_update_tun_param()
2212 p_port->port = port; in qed_iov_pf_update_tun_param()
2223 if (p_req->tun_mode_update_mask || p_req->update_tun_cls || in qed_iov_pf_validate_tunn_param()
2224 p_req->update_geneve_port || p_req->update_vxlan_port) in qed_iov_pf_validate_tunn_param()
2232 if (tun->b_update_mode && !tun->b_mode_enabled) { in qed_pf_validate_tunn_mode()
2233 tun->b_update_mode = false; in qed_pf_validate_tunn_mode()
2234 *rc = -EINVAL; in qed_pf_validate_tunn_mode()
2243 struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth; in qed_pf_validate_modify_tunn_config()
2244 struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel; in qed_pf_validate_modify_tunn_config()
2246 void *cookie = p_hwfn->cdev->ops_cookie; in qed_pf_validate_modify_tunn_config()
2249 *tun_features = p_hwfn->cdev->tunn_feature_mask; in qed_pf_validate_modify_tunn_config()
2250 bultn_vxlan_port = tun->vxlan_port.port; in qed_pf_validate_modify_tunn_config()
2251 bultn_geneve_port = tun->geneve_port.port; in qed_pf_validate_modify_tunn_config()
2252 qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc); in qed_pf_validate_modify_tunn_config()
2253 qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc); in qed_pf_validate_modify_tunn_config()
2254 qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc); in qed_pf_validate_modify_tunn_config()
2255 qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc); in qed_pf_validate_modify_tunn_config()
2256 qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc); in qed_pf_validate_modify_tunn_config()
2258 if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) && in qed_pf_validate_modify_tunn_config()
2259 (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN || in qed_pf_validate_modify_tunn_config()
2260 tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || in qed_pf_validate_modify_tunn_config()
2261 tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || in qed_pf_validate_modify_tunn_config()
2262 tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN || in qed_pf_validate_modify_tunn_config()
2263 tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) { in qed_pf_validate_modify_tunn_config()
2264 tun_src->b_update_rx_cls = false; in qed_pf_validate_modify_tunn_config()
2265 tun_src->b_update_tx_cls = false; in qed_pf_validate_modify_tunn_config()
2266 rc = -EINVAL; in qed_pf_validate_modify_tunn_config()
2269 if (tun_src->vxlan_port.b_update_port) { in qed_pf_validate_modify_tunn_config()
2270 if (tun_src->vxlan_port.port == tun->vxlan_port.port) { in qed_pf_validate_modify_tunn_config()
2271 tun_src->vxlan_port.b_update_port = false; in qed_pf_validate_modify_tunn_config()
2274 bultn_vxlan_port = tun_src->vxlan_port.port; in qed_pf_validate_modify_tunn_config()
2278 if (tun_src->geneve_port.b_update_port) { in qed_pf_validate_modify_tunn_config()
2279 if (tun_src->geneve_port.port == tun->geneve_port.port) { in qed_pf_validate_modify_tunn_config()
2280 tun_src->geneve_port.b_update_port = false; in qed_pf_validate_modify_tunn_config()
2283 bultn_geneve_port = tun_src->geneve_port.port; in qed_pf_validate_modify_tunn_config()
2293 ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port); in qed_pf_validate_modify_tunn_config()
2302 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; in qed_iov_vf_mbx_update_tunn_param()
2303 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_vf_mbx_update_tunn_param()
2312 mbx->offset = (u8 *)mbx->reply_virt; in qed_iov_vf_mbx_update_tunn_param()
2315 p_req = &mbx->req_virt->tunn_param_update; in qed_iov_vf_mbx_update_tunn_param()
2324 tunn.b_update_rx_cls = p_req->update_tun_cls; in qed_iov_vf_mbx_update_tunn_param()
2325 tunn.b_update_tx_cls = p_req->update_tun_cls; in qed_iov_vf_mbx_update_tunn_param()
2328 QED_MODE_VXLAN_TUNN, p_req->vxlan_clss, in qed_iov_vf_mbx_update_tunn_param()
2329 p_req->update_vxlan_port, in qed_iov_vf_mbx_update_tunn_param()
2330 p_req->vxlan_port); in qed_iov_vf_mbx_update_tunn_param()
2333 p_req->l2geneve_clss, in qed_iov_vf_mbx_update_tunn_param()
2334 p_req->update_geneve_port, in qed_iov_vf_mbx_update_tunn_param()
2335 p_req->geneve_port); in qed_iov_vf_mbx_update_tunn_param()
2338 p_req->ipgeneve_clss); in qed_iov_vf_mbx_update_tunn_param()
2340 QED_MODE_L2GRE_TUNN, p_req->l2gre_clss); in qed_iov_vf_mbx_update_tunn_param()
2342 QED_MODE_IPGRE_TUNN, p_req->ipgre_clss); in qed_iov_vf_mbx_update_tunn_param()
2363 geneve_port = p_tun->geneve_port.port; in qed_iov_vf_mbx_update_tunn_param()
2366 p_tun->vxlan_port.port, in qed_iov_vf_mbx_update_tunn_param()
2372 p_resp = qed_add_tlv(p_hwfn, &mbx->offset, in qed_iov_vf_mbx_update_tunn_param()
2376 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, in qed_iov_vf_mbx_update_tunn_param()
2387 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_vf_mbx_start_txq_resp()
2392 mbx->offset = (u8 *)mbx->reply_virt; in qed_iov_vf_mbx_start_txq_resp()
2398 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == in qed_iov_vf_mbx_start_txq_resp()
2407 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, in qed_iov_vf_mbx_start_txq_resp()
2409 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, in qed_iov_vf_mbx_start_txq_resp()
2414 p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); in qed_iov_vf_mbx_start_txq_resp()
2425 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; in qed_iov_vf_mbx_start_txq()
2437 req = &mbx->req_virt->start_txq; in qed_iov_vf_mbx_start_txq()
2439 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, in qed_iov_vf_mbx_start_txq()
2441 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) in qed_iov_vf_mbx_start_txq()
2448 p_queue = &vf->vf_queues[req->tx_qid]; in qed_iov_vf_mbx_start_txq()
2449 if (p_queue->cids[qid_usage_idx].p_cid) in qed_iov_vf_mbx_start_txq()
2454 /* Acquire a new queue-cid */ in qed_iov_vf_mbx_start_txq()
2455 params.queue_id = p_queue->fw_tx_qid; in qed_iov_vf_mbx_start_txq()
2456 params.vport_id = vf->vport_id; in qed_iov_vf_mbx_start_txq()
2457 params.stats_id = vf->abs_vf_id + 0x10; in qed_iov_vf_mbx_start_txq()
2461 sb_dummy.igu_sb_id = req->hw_sb; in qed_iov_vf_mbx_start_txq()
2463 params.sb_idx = req->sb_index; in qed_iov_vf_mbx_start_txq()
2466 vf_params.vfid = vf->relative_vf_id; in qed_iov_vf_mbx_start_txq()
2467 vf_params.vf_qid = (u8)req->tx_qid; in qed_iov_vf_mbx_start_txq()
2471 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, in qed_iov_vf_mbx_start_txq()
2476 pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id); in qed_iov_vf_mbx_start_txq()
2478 req->pbl_addr, req->pbl_size, pq); in qed_iov_vf_mbx_start_txq()
2484 p_queue->cids[qid_usage_idx].p_cid = p_cid; in qed_iov_vf_mbx_start_txq()
2485 p_queue->cids[qid_usage_idx].b_is_tx = true; in qed_iov_vf_mbx_start_txq()
2486 cid = p_cid->cid; in qed_iov_vf_mbx_start_txq()
2505 vf->relative_vf_id, rxq_id, qid_usage_idx); in qed_iov_vf_stop_rxqs()
2506 return -EINVAL; in qed_iov_vf_stop_rxqs()
2509 p_queue = &vf->vf_queues[rxq_id]; in qed_iov_vf_stop_rxqs()
2511 /* We've validated the index and the existence of the active RXQ - in qed_iov_vf_stop_rxqs()
2514 if (!p_queue->cids[qid_usage_idx].p_cid || in qed_iov_vf_stop_rxqs()
2515 p_queue->cids[qid_usage_idx].b_is_tx) { in qed_iov_vf_stop_rxqs()
2521 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", in qed_iov_vf_stop_rxqs()
2522 vf->relative_vf_id, in qed_iov_vf_stop_rxqs()
2523 rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx); in qed_iov_vf_stop_rxqs()
2524 return -EINVAL; in qed_iov_vf_stop_rxqs()
2527 /* Now that we know we have a valid Rx-queue - close it */ in qed_iov_vf_stop_rxqs()
2529 p_queue->cids[qid_usage_idx].p_cid, in qed_iov_vf_stop_rxqs()
2534 p_queue->cids[qid_usage_idx].p_cid = NULL; in qed_iov_vf_stop_rxqs()
2535 vf->num_active_rxqs--; in qed_iov_vf_stop_rxqs()
2548 return -EINVAL; in qed_iov_vf_stop_txqs()
2550 p_queue = &vf->vf_queues[txq_id]; in qed_iov_vf_stop_txqs()
2551 if (!p_queue->cids[qid_usage_idx].p_cid || in qed_iov_vf_stop_txqs()
2552 !p_queue->cids[qid_usage_idx].b_is_tx) in qed_iov_vf_stop_txqs()
2553 return -EINVAL; in qed_iov_vf_stop_txqs()
2555 rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid); in qed_iov_vf_stop_txqs()
2559 p_queue->cids[qid_usage_idx].p_cid = NULL; in qed_iov_vf_stop_txqs()
2568 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; in qed_iov_vf_mbx_stop_rxqs()
2578 req = &mbx->req_virt->stop_rxqs; in qed_iov_vf_mbx_stop_rxqs()
2579 if (req->num_rxqs != 1) { in qed_iov_vf_mbx_stop_rxqs()
2582 vf->relative_vf_id); in qed_iov_vf_mbx_stop_rxqs()
2587 /* Find which qid-index is associated with the queue */ in qed_iov_vf_mbx_stop_rxqs()
2592 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, in qed_iov_vf_mbx_stop_rxqs()
2593 qid_usage_idx, req->cqe_completion); in qed_iov_vf_mbx_stop_rxqs()
2606 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; in qed_iov_vf_mbx_stop_txqs()
2616 req = &mbx->req_virt->stop_txqs; in qed_iov_vf_mbx_stop_txqs()
2617 if (req->num_txqs != 1) { in qed_iov_vf_mbx_stop_txqs()
2620 vf->relative_vf_id); in qed_iov_vf_mbx_stop_txqs()
2625 /* Find which qid-index is associated with the queue */ in qed_iov_vf_mbx_stop_txqs()
2630 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx); in qed_iov_vf_mbx_stop_txqs()
2645 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; in qed_iov_vf_mbx_update_rxqs()
2654 req = &mbx->req_virt->update_rxq; in qed_iov_vf_mbx_update_rxqs()
2655 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); in qed_iov_vf_mbx_update_rxqs()
2656 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); in qed_iov_vf_mbx_update_rxqs()
2662 /* There shouldn't exist a VF that uses queue-qids yet uses this in qed_iov_vf_mbx_update_rxqs()
2665 if ((vf->acquire.vfdev_info.capabilities & in qed_iov_vf_mbx_update_rxqs()
2666 VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) { in qed_iov_vf_mbx_update_rxqs()
2669 vf->relative_vf_id); in qed_iov_vf_mbx_update_rxqs()
2673 /* Validate inputs - for the legacy case this is still true since in qed_iov_vf_mbx_update_rxqs()
2676 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { in qed_iov_vf_mbx_update_rxqs()
2679 !vf->vf_queues[i].cids[qid_usage_idx].p_cid || in qed_iov_vf_mbx_update_rxqs()
2680 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { in qed_iov_vf_mbx_update_rxqs()
2683 vf->relative_vf_id, req->rx_qid, in qed_iov_vf_mbx_update_rxqs()
2684 req->num_rxqs); in qed_iov_vf_mbx_update_rxqs()
2690 for (i = 0; i < req->num_rxqs; i++) { in qed_iov_vf_mbx_update_rxqs()
2691 u16 qid = req->rx_qid + i; in qed_iov_vf_mbx_update_rxqs()
2693 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid; in qed_iov_vf_mbx_update_rxqs()
2697 req->num_rxqs, in qed_iov_vf_mbx_update_rxqs()
2717 if (!p_tlv->length) { in qed_iov_search_list_tlvs()
2722 if (p_tlv->type == req_type) { in qed_iov_search_list_tlvs()
2725 p_tlv->type, p_tlv->length); in qed_iov_search_list_tlvs()
2729 len += p_tlv->length; in qed_iov_search_list_tlvs()
2730 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); in qed_iov_search_list_tlvs()
2732 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { in qed_iov_search_list_tlvs()
2736 } while (p_tlv->type != CHANNEL_TLV_LIST_END); in qed_iov_search_list_tlvs()
2750 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); in qed_iov_vp_update_act_param()
2754 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; in qed_iov_vp_update_act_param()
2755 p_data->vport_active_rx_flg = p_act_tlv->active_rx; in qed_iov_vp_update_act_param()
2756 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; in qed_iov_vp_update_act_param()
2757 p_data->vport_active_tx_flg = p_act_tlv->active_tx; in qed_iov_vp_update_act_param()
2771 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); in qed_iov_vp_update_vlan_param()
2775 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; in qed_iov_vp_update_vlan_param()
2778 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { in qed_iov_vp_update_vlan_param()
2779 p_data->update_inner_vlan_removal_flg = 1; in qed_iov_vp_update_vlan_param()
2780 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; in qed_iov_vp_update_vlan_param()
2795 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, in qed_iov_vp_update_tx_switch()
2800 p_data->update_tx_switching_flg = 1; in qed_iov_vp_update_tx_switch()
2801 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; in qed_iov_vp_update_tx_switch()
2814 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); in qed_iov_vp_update_mcast_bin_param()
2818 p_data->update_approx_mcast_flg = 1; in qed_iov_vp_update_mcast_bin_param()
2819 memcpy(p_data->bins, p_mcast_tlv->bins, in qed_iov_vp_update_mcast_bin_param()
2829 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; in qed_iov_vp_update_accept_flag()
2834 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); in qed_iov_vp_update_accept_flag()
2838 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; in qed_iov_vp_update_accept_flag()
2839 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; in qed_iov_vp_update_accept_flag()
2840 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; in qed_iov_vp_update_accept_flag()
2841 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; in qed_iov_vp_update_accept_flag()
2854 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, in qed_iov_vp_update_accept_any_vlan()
2859 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; in qed_iov_vp_update_accept_any_vlan()
2860 p_data->update_accept_any_vlan_flg = in qed_iov_vp_update_accept_any_vlan()
2861 p_accept_any_vlan->update_accept_any_vlan_flg; in qed_iov_vp_update_accept_any_vlan()
2880 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); in qed_iov_vp_update_rss_param()
2882 p_data->rss_params = NULL; in qed_iov_vp_update_rss_param()
2888 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & in qed_iov_vp_update_rss_param()
2890 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & in qed_iov_vp_update_rss_param()
2892 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & in qed_iov_vp_update_rss_param()
2894 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & in qed_iov_vp_update_rss_param()
2897 p_rss->rss_enable = p_rss_tlv->rss_enable; in qed_iov_vp_update_rss_param()
2898 p_rss->rss_eng_id = vf->relative_vf_id + 1; in qed_iov_vp_update_rss_param()
2899 p_rss->rss_caps = p_rss_tlv->rss_caps; in qed_iov_vp_update_rss_param()
2900 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; in qed_iov_vp_update_rss_param()
2901 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); in qed_iov_vp_update_rss_param()
2903 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), in qed_iov_vp_update_rss_param()
2904 (1 << p_rss_tlv->rss_table_size_log)); in qed_iov_vp_update_rss_param()
2909 q_idx = p_rss_tlv->rss_ind_table[i]; in qed_iov_vp_update_rss_param()
2915 vf->relative_vf_id, q_idx); in qed_iov_vp_update_rss_param()
2920 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]); in qed_iov_vp_update_rss_param()
2921 p_rss->rss_ind_table[i] = p_cid; in qed_iov_vp_update_rss_param()
2924 p_data->rss_params = p_rss; in qed_iov_vp_update_rss_param()
2942 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); in qed_iov_vp_update_sge_tpa_param()
2945 p_data->sge_tpa_params = NULL; in qed_iov_vp_update_sge_tpa_param()
2951 p_sge_tpa->update_tpa_en_flg = in qed_iov_vp_update_sge_tpa_param()
2952 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); in qed_iov_vp_update_sge_tpa_param()
2953 p_sge_tpa->update_tpa_param_flg = in qed_iov_vp_update_sge_tpa_param()
2954 !!(p_sge_tpa_tlv->update_sge_tpa_flags & in qed_iov_vp_update_sge_tpa_param()
2957 p_sge_tpa->tpa_ipv4_en_flg = in qed_iov_vp_update_sge_tpa_param()
2958 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); in qed_iov_vp_update_sge_tpa_param()
2959 p_sge_tpa->tpa_ipv6_en_flg = in qed_iov_vp_update_sge_tpa_param()
2960 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); in qed_iov_vp_update_sge_tpa_param()
2961 p_sge_tpa->tpa_pkt_split_flg = in qed_iov_vp_update_sge_tpa_param()
2962 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); in qed_iov_vp_update_sge_tpa_param()
2963 p_sge_tpa->tpa_hdr_data_split_flg = in qed_iov_vp_update_sge_tpa_param()
2964 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); in qed_iov_vp_update_sge_tpa_param()
2965 p_sge_tpa->tpa_gro_consistent_flg = in qed_iov_vp_update_sge_tpa_param()
2966 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); in qed_iov_vp_update_sge_tpa_param()
2968 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; in qed_iov_vp_update_sge_tpa_param()
2969 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; in qed_iov_vp_update_sge_tpa_param()
2970 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; in qed_iov_vp_update_sge_tpa_param()
2971 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; in qed_iov_vp_update_sge_tpa_param()
2972 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; in qed_iov_vp_update_sge_tpa_param()
2974 p_data->sge_tpa_params = p_sge_tpa; in qed_iov_vp_update_sge_tpa_param()
2985 struct qed_filter_accept_flags *flags = &params->accept_flags; in qed_iov_pre_update_vport()
3001 if (flags->update_rx_mode_config) { in qed_iov_pre_update_vport()
3002 vf_info->rx_accept_mode = flags->rx_accept_filter; in qed_iov_pre_update_vport()
3003 if (!vf_info->is_trusted_configured) in qed_iov_pre_update_vport()
3004 flags->rx_accept_filter &= ~mask; in qed_iov_pre_update_vport()
3007 if (flags->update_tx_mode_config) { in qed_iov_pre_update_vport()
3008 vf_info->tx_accept_mode = flags->tx_accept_filter; in qed_iov_pre_update_vport()
3009 if (!vf_info->is_trusted_configured) in qed_iov_pre_update_vport()
3010 flags->tx_accept_filter &= ~mask; in qed_iov_pre_update_vport()
3013 if (params->update_accept_any_vlan_flg) { in qed_iov_pre_update_vport()
3014 vf_info->accept_any_vlan = params->accept_any_vlan; in qed_iov_pre_update_vport()
3016 if (vf_info->forced_vlan && !vf_info->is_trusted_configured) in qed_iov_pre_update_vport()
3017 params->accept_any_vlan = false; in qed_iov_pre_update_vport()
3029 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; in qed_iov_vf_mbx_vport_update()
3037 if (!vf->vport_instance) { in qed_iov_vf_mbx_vport_update()
3041 vf->abs_vf_id); in qed_iov_vf_mbx_vport_update()
3052 params.opaque_fid = vf->opaque_fid; in qed_iov_vf_mbx_vport_update()
3053 params.vport_id = vf->vport_id; in qed_iov_vf_mbx_vport_update()
3077 if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id, in qed_iov_vf_mbx_vport_update()
3087 "Upper-layer prevents VF vport configuration\n"); in qed_iov_vf_mbx_vport_update()
3114 if (p_params->opcode == QED_FILTER_REMOVE) { in qed_iov_vf_update_vlan_shadow()
3116 if (p_vf->shadow_config.vlans[i].used && in qed_iov_vf_update_vlan_shadow()
3117 p_vf->shadow_config.vlans[i].vid == in qed_iov_vf_update_vlan_shadow()
3118 p_params->vlan) { in qed_iov_vf_update_vlan_shadow()
3119 p_vf->shadow_config.vlans[i].used = false; in qed_iov_vf_update_vlan_shadow()
3125 "VF [%d] - Tries to remove a non-existing vlan\n", in qed_iov_vf_update_vlan_shadow()
3126 p_vf->relative_vf_id); in qed_iov_vf_update_vlan_shadow()
3127 return -EINVAL; in qed_iov_vf_update_vlan_shadow()
3129 } else if (p_params->opcode == QED_FILTER_REPLACE || in qed_iov_vf_update_vlan_shadow()
3130 p_params->opcode == QED_FILTER_FLUSH) { in qed_iov_vf_update_vlan_shadow()
3132 p_vf->shadow_config.vlans[i].used = false; in qed_iov_vf_update_vlan_shadow()
3135 /* In forced mode, we're willing to remove entries - but we don't add in qed_iov_vf_update_vlan_shadow()
3138 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) in qed_iov_vf_update_vlan_shadow()
3141 if (p_params->opcode == QED_FILTER_ADD || in qed_iov_vf_update_vlan_shadow()
3142 p_params->opcode == QED_FILTER_REPLACE) { in qed_iov_vf_update_vlan_shadow()
3144 if (p_vf->shadow_config.vlans[i].used) in qed_iov_vf_update_vlan_shadow()
3147 p_vf->shadow_config.vlans[i].used = true; in qed_iov_vf_update_vlan_shadow()
3148 p_vf->shadow_config.vlans[i].vid = p_params->vlan; in qed_iov_vf_update_vlan_shadow()
3155 "VF [%d] - Tries to configure more than %d vlan filters\n", in qed_iov_vf_update_vlan_shadow()
3156 p_vf->relative_vf_id, in qed_iov_vf_update_vlan_shadow()
3158 return -EINVAL; in qed_iov_vf_update_vlan_shadow()
3171 /* If we're in forced-mode, we don't allow any change */ in qed_iov_vf_update_mac_shadow()
3172 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) in qed_iov_vf_update_mac_shadow()
3176 if (p_vf->p_vf_info.is_trusted_configured) in qed_iov_vf_update_mac_shadow()
3180 if (p_params->opcode == QED_FILTER_REMOVE) { in qed_iov_vf_update_mac_shadow()
3182 if (ether_addr_equal(p_vf->shadow_config.macs[i], in qed_iov_vf_update_mac_shadow()
3183 p_params->mac)) { in qed_iov_vf_update_mac_shadow()
3184 eth_zero_addr(p_vf->shadow_config.macs[i]); in qed_iov_vf_update_mac_shadow()
3192 return -EINVAL; in qed_iov_vf_update_mac_shadow()
3194 } else if (p_params->opcode == QED_FILTER_REPLACE || in qed_iov_vf_update_mac_shadow()
3195 p_params->opcode == QED_FILTER_FLUSH) { in qed_iov_vf_update_mac_shadow()
3197 eth_zero_addr(p_vf->shadow_config.macs[i]); in qed_iov_vf_update_mac_shadow()
3201 if (p_params->opcode != QED_FILTER_ADD && in qed_iov_vf_update_mac_shadow()
3202 p_params->opcode != QED_FILTER_REPLACE) in qed_iov_vf_update_mac_shadow()
3206 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { in qed_iov_vf_update_mac_shadow()
3207 ether_addr_copy(p_vf->shadow_config.macs[i], in qed_iov_vf_update_mac_shadow()
3208 p_params->mac); in qed_iov_vf_update_mac_shadow()
3217 return -EINVAL; in qed_iov_vf_update_mac_shadow()
3230 if (p_params->type == QED_FILTER_MAC) { in qed_iov_vf_update_unicast_shadow()
3236 if (p_params->type == QED_FILTER_VLAN) in qed_iov_vf_update_unicast_shadow()
3249 return -EINVAL; in qed_iov_chk_ucast()
3252 if (params->type == QED_FILTER_MAC || in qed_iov_chk_ucast()
3253 params->type == QED_FILTER_MAC_VLAN) { in qed_iov_chk_ucast()
3254 ether_addr_copy(vf->mac, params->mac); in qed_iov_chk_ucast()
3256 if (vf->is_trusted_configured) { in qed_iov_chk_ucast()
3257 qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid); in qed_iov_chk_ucast()
3271 struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; in qed_iov_vf_mbx_ucast_filter()
3272 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; in qed_iov_vf_mbx_ucast_filter()
3280 req = &mbx->req_virt->ucast_filter; in qed_iov_vf_mbx_ucast_filter()
3281 params.opcode = (enum qed_filter_opcode)req->opcode; in qed_iov_vf_mbx_ucast_filter()
3282 params.type = (enum qed_filter_ucast_type)req->type; in qed_iov_vf_mbx_ucast_filter()
3286 params.vport_to_remove_from = vf->vport_id; in qed_iov_vf_mbx_ucast_filter()
3287 params.vport_to_add_to = vf->vport_id; in qed_iov_vf_mbx_ucast_filter()
3288 memcpy(params.mac, req->mac, ETH_ALEN); in qed_iov_vf_mbx_ucast_filter()
3289 params.vlan = req->vlan; in qed_iov_vf_mbx_ucast_filter()
3294 vf->abs_vf_id, params.opcode, params.type, in qed_iov_vf_mbx_ucast_filter()
3300 if (!vf->vport_instance) { in qed_iov_vf_mbx_ucast_filter()
3304 vf->abs_vf_id); in qed_iov_vf_mbx_ucast_filter()
3316 if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && in qed_iov_vf_mbx_ucast_filter()
3328 if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && in qed_iov_vf_mbx_ucast_filter()
3331 if (!ether_addr_equal(p_bulletin->mac, params.mac) || in qed_iov_vf_mbx_ucast_filter()
3338 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params); in qed_iov_vf_mbx_ucast_filter()
3344 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params, in qed_iov_vf_mbx_ucast_filter()
3361 for (i = 0; i < vf->num_sbs; i++) in qed_iov_vf_mbx_int_cleanup()
3363 vf->igu_sbs[i], in qed_iov_vf_mbx_int_cleanup()
3364 vf->opaque_fid, false); in qed_iov_vf_mbx_int_cleanup()
3397 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { in qed_iov_vf_mbx_release()
3399 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, in qed_iov_vf_mbx_release()
3400 p_vf->opaque_fid); in qed_iov_vf_mbx_release()
3408 p_vf->state = VF_STOPPED; in qed_iov_vf_mbx_release()
3419 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_vf_pf_get_coalesce()
3429 mbx->offset = (u8 *)mbx->reply_virt; in qed_iov_vf_pf_get_coalesce()
3430 req = &mbx->req_virt->read_coal_req; in qed_iov_vf_pf_get_coalesce()
3432 qid = req->qid; in qed_iov_vf_pf_get_coalesce()
3433 b_is_rx = req->is_rx ? true : false; in qed_iov_vf_pf_get_coalesce()
3440 p_vf->abs_vf_id, qid); in qed_iov_vf_pf_get_coalesce()
3444 p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); in qed_iov_vf_pf_get_coalesce()
3453 p_vf->abs_vf_id, qid); in qed_iov_vf_pf_get_coalesce()
3457 p_queue = &p_vf->vf_queues[qid]; in qed_iov_vf_pf_get_coalesce()
3458 if ((!p_queue->cids[i].p_cid) || in qed_iov_vf_pf_get_coalesce()
3459 (!p_queue->cids[i].b_is_tx)) in qed_iov_vf_pf_get_coalesce()
3462 p_cid = p_queue->cids[i].p_cid; in qed_iov_vf_pf_get_coalesce()
3474 p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ, in qed_iov_vf_pf_get_coalesce()
3476 p_resp->coal = coal; in qed_iov_vf_pf_get_coalesce()
3478 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, in qed_iov_vf_pf_get_coalesce()
3488 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; in qed_iov_vf_pf_set_coalesce()
3496 req = &mbx->req_virt->update_coalesce; in qed_iov_vf_pf_set_coalesce()
3498 rx_coal = req->rx_coal; in qed_iov_vf_pf_set_coalesce()
3499 tx_coal = req->tx_coal; in qed_iov_vf_pf_set_coalesce()
3500 qid = req->qid; in qed_iov_vf_pf_set_coalesce()
3506 vf->abs_vf_id, qid); in qed_iov_vf_pf_set_coalesce()
3514 vf->abs_vf_id, qid); in qed_iov_vf_pf_set_coalesce()
3521 vf->abs_vf_id, rx_coal, tx_coal, qid); in qed_iov_vf_pf_set_coalesce()
3524 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); in qed_iov_vf_pf_set_coalesce()
3531 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); in qed_iov_vf_pf_set_coalesce()
3534 vf->rx_coal = rx_coal; in qed_iov_vf_pf_set_coalesce()
3538 struct qed_vf_queue *p_queue = &vf->vf_queues[qid]; in qed_iov_vf_pf_set_coalesce()
3541 if (!p_queue->cids[i].p_cid) in qed_iov_vf_pf_set_coalesce()
3544 if (!p_queue->cids[i].b_is_tx) in qed_iov_vf_pf_set_coalesce()
3548 p_queue->cids[i].p_cid); in qed_iov_vf_pf_set_coalesce()
3554 vf->abs_vf_id); in qed_iov_vf_pf_set_coalesce()
3558 vf->tx_coal = tx_coal; in qed_iov_vf_pf_set_coalesce()
3574 qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid); in qed_iov_vf_flr_poll_dorq()
3582 qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); in qed_iov_vf_flr_poll_dorq()
3586 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", in qed_iov_vf_flr_poll_dorq()
3587 p_vf->abs_vf_id, val); in qed_iov_vf_flr_poll_dorq()
3588 return -EBUSY; in qed_iov_vf_flr_poll_dorq()
3601 u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port; in qed_iov_vf_flr_poll_pbf()
3602 u8 max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; in qed_iov_vf_flr_poll_pbf()
3621 distance[voq] = prod - cons[voq]; in qed_iov_vf_flr_poll_pbf()
3638 if (distance[voq] > tmp - cons[voq]) in qed_iov_vf_flr_poll_pbf()
3656 p_vf->abs_vf_id, (int)voq); in qed_iov_vf_flr_poll_pbf()
3661 return -EBUSY; in qed_iov_vf_flr_poll_pbf()
3695 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & in qed_iov_execute_vf_flr_cleanup()
3697 u16 vfid = p_vf->abs_vf_id; in qed_iov_execute_vf_flr_cleanup()
3700 "VF[%d] - Handling FLR\n", vfid); in qed_iov_execute_vf_flr_cleanup()
3705 if (!p_vf->b_init) in qed_iov_execute_vf_flr_cleanup()
3718 /* Workaround to make VF-PF channel ready, as FW in qed_iov_execute_vf_flr_cleanup()
3726 * but prior to re-enabling the VF. in qed_iov_execute_vf_flr_cleanup()
3728 p_vf->state = VF_STOPPED; in qed_iov_execute_vf_flr_cleanup()
3732 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", in qed_iov_execute_vf_flr_cleanup()
3738 if (p_vf->state == VF_RESET) in qed_iov_execute_vf_flr_cleanup()
3739 p_vf->state = VF_STOPPED; in qed_iov_execute_vf_flr_cleanup()
3741 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= in qed_iov_execute_vf_flr_cleanup()
3743 p_vf->vf_mbx.b_pending_msg = false; in qed_iov_execute_vf_flr_cleanup()
3758 /* Since BRB <-> PRS interface can't be tested as part of the flr in qed_iov_vf_flr_cleanup()
3760 * there's no need to wait per-vf, do it before looping. in qed_iov_vf_flr_cleanup()
3764 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) in qed_iov_vf_flr_cleanup()
3776 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); in qed_iov_mark_vf_flr()
3780 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); in qed_iov_mark_vf_flr()
3782 if (!p_hwfn->cdev->p_iov_info) { in qed_iov_mark_vf_flr()
3788 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { in qed_iov_mark_vf_flr()
3796 vfid = p_vf->abs_vf_id; in qed_iov_mark_vf_flr()
3798 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; in qed_iov_mark_vf_flr()
3799 u16 rel_vf_id = p_vf->relative_vf_id; in qed_iov_mark_vf_flr()
3802 "VF[%d] [rel %d] got FLR-ed\n", in qed_iov_mark_vf_flr()
3805 p_vf->state = VF_RESET; in qed_iov_mark_vf_flr()
3832 return -EINVAL; in qed_iov_get_link()
3834 p_bulletin = p_vf->bulletin.p_virt; in qed_iov_get_link()
3850 struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt; in qed_iov_vf_pf_bulletin_update_mac()
3851 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_vf_pf_bulletin_update_mac()
3856 if (!p_vf->p_vf_info.is_trusted_configured) { in qed_iov_vf_pf_bulletin_update_mac()
3860 p_vf->abs_vf_id); in qed_iov_vf_pf_bulletin_update_mac()
3862 rc = -EINVAL; in qed_iov_vf_pf_bulletin_update_mac()
3866 p_req = &mbx->req_virt->bulletin_update_mac; in qed_iov_vf_pf_bulletin_update_mac()
3867 ether_addr_copy(p_bulletin->mac, p_req->mac); in qed_iov_vf_pf_bulletin_update_mac()
3870 p_vf->abs_vf_id, p_req->mac); in qed_iov_vf_pf_bulletin_update_mac()
3889 mbx = &p_vf->vf_mbx; in qed_iov_process_mbx_req()
3892 if (!mbx->b_pending_msg) { in qed_iov_process_mbx_req()
3895 p_vf->abs_vf_id); in qed_iov_process_mbx_req()
3898 mbx->b_pending_msg = false; in qed_iov_process_mbx_req()
3900 mbx->first_tlv = mbx->req_virt->first_tlv; in qed_iov_process_mbx_req()
3904 p_vf->abs_vf_id, mbx->first_tlv.tl.type); in qed_iov_process_mbx_req()
3907 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && in qed_iov_process_mbx_req()
3908 !p_vf->b_malicious) { in qed_iov_process_mbx_req()
3909 switch (mbx->first_tlv.tl.type) { in qed_iov_process_mbx_req()
3962 } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { in qed_iov_process_mbx_req()
3964 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", in qed_iov_process_mbx_req()
3965 p_vf->abs_vf_id, mbx->first_tlv.tl.type); in qed_iov_process_mbx_req()
3968 mbx->first_tlv.tl.type, in qed_iov_process_mbx_req()
3972 /* unknown TLV - this may belong to a VF driver from the future in qed_iov_process_mbx_req()
3973 * - a version written after this PF driver was written, which in qed_iov_process_mbx_req()
3980 p_vf->abs_vf_id, in qed_iov_process_mbx_req()
3981 mbx->first_tlv.tl.type, in qed_iov_process_mbx_req()
3982 mbx->first_tlv.tl.length, in qed_iov_process_mbx_req()
3983 mbx->first_tlv.padding, mbx->first_tlv.reply_address); in qed_iov_process_mbx_req()
3988 if (p_vf->acquire.first_tlv.reply_address && in qed_iov_process_mbx_req()
3989 (mbx->first_tlv.reply_address == in qed_iov_process_mbx_req()
3990 p_vf->acquire.first_tlv.reply_address)) { in qed_iov_process_mbx_req()
3992 mbx->first_tlv.tl.type, in qed_iov_process_mbx_req()
3998 "VF[%02x]: Can't respond to TLV - no valid reply address\n", in qed_iov_process_mbx_req()
3999 p_vf->abs_vf_id); in qed_iov_process_mbx_req()
4013 p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; in qed_iov_pf_get_pending_events()
4014 if (p_vf->vf_mbx.b_pending_msg) in qed_iov_pf_get_pending_events()
4022 u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; in qed_sriov_get_vf_from_absid()
4024 if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { in qed_sriov_get_vf_from_absid()
4032 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; in qed_sriov_get_vf_from_absid()
4047 p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo); in qed_sriov_vfpf_msg()
4050 p_vf->vf_mbx.b_pending_msg = true; in qed_sriov_vfpf_msg()
4062 (p_data->entity_id)); in qed_sriov_vfpf_malicious()
4066 if (!p_vf->b_malicious) { in qed_sriov_vfpf_malicious()
4068 "VF [%d] - Malicious behavior [%02x]\n", in qed_sriov_vfpf_malicious()
4069 p_vf->abs_vf_id, p_data->err_id); in qed_sriov_vfpf_malicious()
4071 p_vf->b_malicious = true; in qed_sriov_vfpf_malicious()
4074 "VF [%d] - Malicious behavior [%02x]\n", in qed_sriov_vfpf_malicious()
4075 p_vf->abs_vf_id, p_data->err_id); in qed_sriov_vfpf_malicious()
4085 &data->vf_pf_channel.msg_addr); in qed_sriov_eqe_event()
4087 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", in qed_sriov_eqe_event()
4089 return -EINVAL; in qed_sriov_eqe_event()
4095 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; in qed_iov_get_next_active_vf()
4101 for (i = rel_vf_id; i < p_iov->total_vfs; i++) in qed_iov_get_next_active_vf()
4117 return -EINVAL; in qed_iov_copy_vf_msg()
4122 params.src_vfid = vf_info->abs_vf_id; in qed_iov_copy_vf_msg()
4125 vf_info->vf_mbx.pending_req, in qed_iov_copy_vf_msg()
4126 vf_info->vf_mbx.req_phys, in qed_iov_copy_vf_msg()
4131 return -EIO; in qed_iov_copy_vf_msg()
4145 DP_NOTICE(p_hwfn->cdev, in qed_iov_bulletin_set_forced_mac()
4150 if (vf_info->b_malicious) { in qed_iov_bulletin_set_forced_mac()
4151 DP_NOTICE(p_hwfn->cdev, in qed_iov_bulletin_set_forced_mac()
4156 if (vf_info->p_vf_info.is_trusted_configured) { in qed_iov_bulletin_set_forced_mac()
4159 vf_info->bulletin.p_virt->valid_bitmap &= in qed_iov_bulletin_set_forced_mac()
4164 vf_info->bulletin.p_virt->valid_bitmap &= in qed_iov_bulletin_set_forced_mac()
4168 memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); in qed_iov_bulletin_set_forced_mac()
4170 vf_info->bulletin.p_virt->valid_bitmap |= feature; in qed_iov_bulletin_set_forced_mac()
4182 DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n", in qed_iov_bulletin_set_mac()
4184 return -EINVAL; in qed_iov_bulletin_set_mac()
4187 if (vf_info->b_malicious) { in qed_iov_bulletin_set_mac()
4188 DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n", in qed_iov_bulletin_set_mac()
4190 return -EINVAL; in qed_iov_bulletin_set_mac()
4193 if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) { in qed_iov_bulletin_set_mac()
4196 return -EINVAL; in qed_iov_bulletin_set_mac()
4200 ether_addr_copy(vf_info->bulletin.p_virt->mac, mac); in qed_iov_bulletin_set_mac()
4202 vf_info->bulletin.p_virt->valid_bitmap |= feature; in qed_iov_bulletin_set_mac()
4204 if (vf_info->p_vf_info.is_trusted_configured) in qed_iov_bulletin_set_mac()
4218 DP_NOTICE(p_hwfn->cdev, in qed_iov_bulletin_set_forced_vlan()
4223 if (vf_info->b_malicious) { in qed_iov_bulletin_set_forced_vlan()
4224 DP_NOTICE(p_hwfn->cdev, in qed_iov_bulletin_set_forced_vlan()
4230 vf_info->bulletin.p_virt->pvid = pvid; in qed_iov_bulletin_set_forced_vlan()
4232 vf_info->bulletin.p_virt->valid_bitmap |= feature; in qed_iov_bulletin_set_forced_vlan()
4234 vf_info->bulletin.p_virt->valid_bitmap &= ~feature; in qed_iov_bulletin_set_forced_vlan()
4246 DP_NOTICE(p_hwfn->cdev, in qed_iov_bulletin_set_udp_ports()
4251 if (vf_info->b_malicious) { in qed_iov_bulletin_set_udp_ports()
4258 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; in qed_iov_bulletin_set_udp_ports()
4259 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; in qed_iov_bulletin_set_udp_ports()
4270 return !!p_vf_info->vport_instance; in qed_iov_vf_has_vport_instance()
4281 return p_vf_info->state == VF_STOPPED; in qed_iov_is_vf_stopped()
4292 return vf_info->spoof_chk; in qed_iov_spoofchk_get()
4298 int rc = -EINVAL; in qed_iov_spoofchk_set()
4302 "SR-IOV sanity check failed, can't set spoofchk\n"); in qed_iov_spoofchk_set()
4312 vf->req_spoofchk_val = val; in qed_iov_spoofchk_set()
4328 if (!p_vf || !p_vf->bulletin.p_virt) in qed_iov_bulletin_get_mac()
4331 if (!(p_vf->bulletin.p_virt->valid_bitmap & in qed_iov_bulletin_get_mac()
4335 return p_vf->bulletin.p_virt->mac; in qed_iov_bulletin_get_mac()
4344 if (!p_vf || !p_vf->bulletin.p_virt) in qed_iov_bulletin_get_forced_mac()
4347 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) in qed_iov_bulletin_get_forced_mac()
4350 return p_vf->bulletin.p_virt->mac; in qed_iov_bulletin_get_forced_mac()
4359 if (!p_vf || !p_vf->bulletin.p_virt) in qed_iov_bulletin_get_forced_vlan()
4362 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) in qed_iov_bulletin_get_forced_vlan()
4365 return p_vf->bulletin.p_virt->pvid; in qed_iov_bulletin_get_forced_vlan()
4378 return -EINVAL; in qed_iov_configure_tx_rate()
4380 rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); in qed_iov_configure_tx_rate()
4397 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; in qed_iov_configure_min_tx_rate()
4401 "SR-IOV sanity check failed, can't set min rate\n"); in qed_iov_configure_min_tx_rate()
4402 return -EINVAL; in qed_iov_configure_min_tx_rate()
4408 return -EINVAL; in qed_iov_configure_min_tx_rate()
4410 vport_id = vf->vport_id; in qed_iov_configure_min_tx_rate()
4424 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; in qed_iov_get_vf_min_rate()
4426 if (vf_vp_wfq->configured) in qed_iov_get_vf_min_rate()
4427 return vf_vp_wfq->min_speed; in qed_iov_get_vf_min_rate()
4433 * qed_schedule_iov - schedules IOV task for VF and PF
4441 set_bit(flag, &hwfn->iov_task_flags); in qed_schedule_iov()
4445 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); in qed_schedule_iov()
4453 queue_delayed_work(cdev->hwfns[i].iov_wq, in qed_vf_start_iov_wq()
4454 &cdev->hwfns[i].iov_task, 0); in qed_vf_start_iov_wq()
4462 if (cdev->hwfns[i].iov_wq) in qed_sriov_disable()
4463 flush_workqueue(cdev->hwfns[i].iov_wq); in qed_sriov_disable()
4468 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) in qed_sriov_disable()
4469 pci_disable_sriov(cdev->pdev); in qed_sriov_disable()
4471 if (cdev->recov_in_prog) { in qed_sriov_disable()
4479 struct qed_hwfn *hwfn = &cdev->hwfns[i]; in qed_sriov_disable()
4487 return -EBUSY; in qed_sriov_disable()
4508 qed_iov_release_hw_for_vf(&cdev->hwfns[i], in qed_sriov_disable()
4529 /* Since we have an equal resource distribution per-VF, and we assume in qed_sriov_enable_qid_config()
4533 base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues; in qed_sriov_enable_qid_config()
4535 params->rel_vf_id = vfid; in qed_sriov_enable_qid_config()
4536 for (i = 0; i < params->num_queues; i++) { in qed_sriov_enable_qid_config()
4537 params->req_rx_queue[i] = base + i; in qed_sriov_enable_qid_config()
4538 params->req_tx_queue[i] = base + i; in qed_sriov_enable_qid_config()
4549 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { in qed_sriov_enable()
4551 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); in qed_sriov_enable()
4552 return -EINVAL; in qed_sriov_enable()
4559 hwfn = &cdev->hwfns[j]; in qed_sriov_enable()
4569 rc = -EBUSY; in qed_sriov_enable()
4590 rc = pci_enable_sriov(cdev->pdev, num); in qed_sriov_enable()
4600 rc = -EBUSY; in qed_sriov_enable()
4619 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); in qed_sriov_configure()
4620 return -EOPNOTSUPP; in qed_sriov_configure()
4633 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { in qed_sriov_pf_set_mac()
4636 return -EINVAL; in qed_sriov_pf_set_mac()
4639 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { in qed_sriov_pf_set_mac()
4642 return -EINVAL; in qed_sriov_pf_set_mac()
4646 struct qed_hwfn *hwfn = &cdev->hwfns[i]; in qed_sriov_pf_set_mac()
4654 if (vf_info->is_trusted_configured) in qed_sriov_pf_set_mac()
4655 ether_addr_copy(vf_info->mac, mac); in qed_sriov_pf_set_mac()
4657 ether_addr_copy(vf_info->forced_mac, mac); in qed_sriov_pf_set_mac()
4669 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { in qed_sriov_pf_set_vlan()
4672 return -EINVAL; in qed_sriov_pf_set_vlan()
4675 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { in qed_sriov_pf_set_vlan()
4678 return -EINVAL; in qed_sriov_pf_set_vlan()
4682 struct qed_hwfn *hwfn = &cdev->hwfns[i]; in qed_sriov_pf_set_vlan()
4690 vf_info->forced_vlan = vid; in qed_sriov_pf_set_vlan()
4708 return -EINVAL; in qed_get_vf_config()
4710 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) { in qed_get_vf_config()
4713 return -EINVAL; in qed_get_vf_config()
4723 ivi->vf = vf_id; in qed_get_vf_config()
4725 if (is_valid_ether_addr(vf_info->forced_mac)) in qed_get_vf_config()
4726 ether_addr_copy(ivi->mac, vf_info->forced_mac); in qed_get_vf_config()
4728 ether_addr_copy(ivi->mac, vf_info->mac); in qed_get_vf_config()
4730 ivi->vlan = vf_info->forced_vlan; in qed_get_vf_config()
4731 ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); in qed_get_vf_config()
4732 ivi->linkstate = vf_info->link_state; in qed_get_vf_config()
4733 tx_rate = vf_info->tx_rate; in qed_get_vf_config()
4734 ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; in qed_get_vf_config()
4735 ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); in qed_get_vf_config()
4736 ivi->trusted = vf_info->is_trusted_request; in qed_get_vf_config()
4743 struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev); in qed_inform_vf_link_state()
4749 if (!hwfn->pf_iov_info) in qed_inform_vf_link_state()
4753 for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { in qed_inform_vf_link_state()
4760 /* Only hwfn0 is actually interested in the link speed. in qed_inform_vf_link_state()
4762 * need to take configuration from it - otherwise things like in qed_inform_vf_link_state()
4772 switch (vf_info->link_state) { in qed_inform_vf_link_state()
4778 /* Set speed according to maximum supported by HW. in qed_inform_vf_link_state()
4782 link.speed = (hwfn->cdev->num_hwfns > 1) ? in qed_inform_vf_link_state()
4790 if (link.link_up && vf_info->tx_rate) { in qed_inform_vf_link_state()
4794 rate = min_t(int, vf_info->tx_rate, link.speed); in qed_inform_vf_link_state()
4803 vf_info->tx_rate = rate; in qed_inform_vf_link_state()
4804 link.speed = rate; in qed_inform_vf_link_state()
4823 return -EINVAL; in qed_set_vf_link_state()
4825 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) { in qed_set_vf_link_state()
4828 return -EINVAL; in qed_set_vf_link_state()
4833 struct qed_hwfn *hwfn = &cdev->hwfns[i]; in qed_set_vf_link_state()
4840 if (vf->link_state == link_state) in qed_set_vf_link_state()
4843 vf->link_state = link_state; in qed_set_vf_link_state()
4844 qed_inform_vf_link_state(&cdev->hwfns[i]); in qed_set_vf_link_state()
4852 int i, rc = -EINVAL; in qed_spoof_configure()
4855 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; in qed_spoof_configure()
4870 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; in qed_configure_max_vf_rate()
4875 "SR-IOV sanity check failed, can't set tx rate\n"); in qed_configure_max_vf_rate()
4876 return -EINVAL; in qed_configure_max_vf_rate()
4881 vf->tx_rate = rate; in qed_configure_max_vf_rate()
4901 return -EINVAL; in qed_set_vf_rate()
4911 struct qed_hwfn *hwfn = &cdev->hwfns[i]; in qed_set_vf_trust()
4916 "SR-IOV sanity check failed, can't set trust\n"); in qed_set_vf_trust()
4917 return -EINVAL; in qed_set_vf_trust()
4922 if (vf->is_trusted_request == trust) in qed_set_vf_trust()
4924 vf->is_trusted_request = trust; in qed_set_vf_trust()
4941 "Can't acquire PTT; re-scheduling\n"); in qed_handle_vf_msg()
4959 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); in qed_handle_vf_msg()
4975 if (info->is_trusted_configured) { in qed_pf_validate_req_vf_mac()
4976 if (is_valid_ether_addr(info->mac) && in qed_pf_validate_req_vf_mac()
4977 (!mac || !ether_addr_equal(mac, info->mac))) in qed_pf_validate_req_vf_mac()
4980 if (is_valid_ether_addr(info->forced_mac) && in qed_pf_validate_req_vf_mac()
4981 (!mac || !ether_addr_equal(mac, info->forced_mac))) in qed_pf_validate_req_vf_mac()
4992 if (info->is_trusted_configured) in qed_set_bulletin_mac()
4993 qed_iov_bulletin_set_mac(hwfn, info->mac, vfid); in qed_set_bulletin_mac()
4995 qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid); in qed_set_bulletin_mac()
5012 if (info->is_trusted_configured) in qed_handle_pf_set_vf_unicast()
5022 hwfn->cdev->p_iov_info->first_vf_in_pf + i); in qed_handle_pf_set_vf_unicast()
5030 info->forced_vlan) { in qed_handle_pf_set_vf_unicast()
5034 info->forced_vlan, in qed_handle_pf_set_vf_unicast()
5036 hwfn->cdev->p_iov_info->first_vf_in_pf + i); in qed_handle_pf_set_vf_unicast()
5038 info->forced_vlan, i); in qed_handle_pf_set_vf_unicast()
5079 if (vf_info->is_trusted_configured && in qed_update_mac_for_vf_trust_change()
5080 (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) { in qed_update_mac_for_vf_trust_change()
5088 if (ether_addr_equal(vf->shadow_config.macs[i], in qed_update_mac_for_vf_trust_change()
5089 vf_info->mac)) { in qed_update_mac_for_vf_trust_change()
5090 eth_zero_addr(vf->shadow_config.macs[i]); in qed_update_mac_for_vf_trust_change()
5093 vf_info->mac, vf_id); in qed_update_mac_for_vf_trust_change()
5098 ether_addr_copy(vf_info->mac, force_mac); in qed_update_mac_for_vf_trust_change()
5099 eth_zero_addr(vf_info->forced_mac); in qed_update_mac_for_vf_trust_change()
5100 vf->bulletin.p_virt->valid_bitmap &= in qed_update_mac_for_vf_trust_change()
5107 if (!vf_info->is_trusted_configured) { in qed_update_mac_for_vf_trust_change()
5112 if (ether_addr_equal(vf->shadow_config.macs[i], in qed_update_mac_for_vf_trust_change()
5114 ether_addr_copy(vf->shadow_config.macs[i], in qed_update_mac_for_vf_trust_change()
5115 vf_info->mac); in qed_update_mac_for_vf_trust_change()
5118 vf_info->mac, vf_id); in qed_update_mac_for_vf_trust_change()
5148 if (vf_info->is_trusted_configured == in qed_iov_handle_trust_change()
5149 vf_info->is_trusted_request) in qed_iov_handle_trust_change()
5151 vf_info->is_trusted_configured = vf_info->is_trusted_request; in qed_iov_handle_trust_change()
5158 if (!vf || !vf->vport_instance) in qed_iov_handle_trust_change()
5162 params.opaque_fid = vf->opaque_fid; in qed_iov_handle_trust_change()
5163 params.vport_id = vf->vport_id; in qed_iov_handle_trust_change()
5166 params.mac_chk_en = !vf_info->is_trusted_configured; in qed_iov_handle_trust_change()
5169 if (vf_info->accept_any_vlan && vf_info->forced_vlan) { in qed_iov_handle_trust_change()
5171 params.accept_any_vlan = vf_info->accept_any_vlan; in qed_iov_handle_trust_change()
5174 if (vf_info->rx_accept_mode & mask) { in qed_iov_handle_trust_change()
5175 flags->update_rx_mode_config = 1; in qed_iov_handle_trust_change()
5176 flags->rx_accept_filter = vf_info->rx_accept_mode; in qed_iov_handle_trust_change()
5179 if (vf_info->tx_accept_mode & mask) { in qed_iov_handle_trust_change()
5180 flags->update_tx_mode_config = 1; in qed_iov_handle_trust_change()
5181 flags->tx_accept_filter = vf_info->tx_accept_mode; in qed_iov_handle_trust_change()
5185 if (!vf_info->is_trusted_configured) { in qed_iov_handle_trust_change()
5186 flags->rx_accept_filter &= ~mask; in qed_iov_handle_trust_change()
5187 flags->tx_accept_filter &= ~mask; in qed_iov_handle_trust_change()
5191 if (flags->update_rx_mode_config || in qed_iov_handle_trust_change()
5192 flags->update_tx_mode_config || in qed_iov_handle_trust_change()
5197 vf_info->is_trusted_configured ? "trusted" : "untrusted", in qed_iov_handle_trust_change()
5198 vf->abs_vf_id, vf->relative_vf_id); in qed_iov_handle_trust_change()
5212 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) in qed_iov_pf_task()
5215 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { in qed_iov_pf_task()
5230 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) in qed_iov_pf_task()
5234 &hwfn->iov_task_flags)) in qed_iov_pf_task()
5238 &hwfn->iov_task_flags)) in qed_iov_pf_task()
5241 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags)) in qed_iov_pf_task()
5250 if (!cdev->hwfns[i].iov_wq) in qed_iov_wq_stop()
5254 qed_schedule_iov(&cdev->hwfns[i], in qed_iov_wq_stop()
5256 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); in qed_iov_wq_stop()
5259 destroy_workqueue(cdev->hwfns[i].iov_wq); in qed_iov_wq_stop()
5269 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; in qed_iov_wq_start()
5274 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) in qed_iov_wq_start()
5277 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", in qed_iov_wq_start()
5278 cdev->pdev->bus->number, in qed_iov_wq_start()
5279 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); in qed_iov_wq_start()
5281 p_hwfn->iov_wq = create_singlethread_workqueue(name); in qed_iov_wq_start()
5282 if (!p_hwfn->iov_wq) { in qed_iov_wq_start()
5284 return -ENOMEM; in qed_iov_wq_start()
5288 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); in qed_iov_wq_start()
5290 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); in qed_iov_wq_start()