Lines Matching full:eq
43 #include <linux/mlx5/eq.h>
87 struct mlx5_ib_pf_eq *eq; member
1475 * the eq, switch to the dummy pagefault for the rest of the in mlx5_ib_mr_rdma_pfault_handler()
1586 struct mlx5_ib_pf_eq *eq = pfault->eq; in mlx5_ib_eqe_pf_action() local
1588 mlx5_ib_pfault(eq->dev, pfault); in mlx5_ib_eqe_pf_action()
1589 mempool_free(pfault, eq->pool); in mlx5_ib_eqe_pf_action()
1593 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) in mlx5_ib_eq_pf_process() argument
1600 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) { in mlx5_ib_eq_pf_process()
1601 pfault = mempool_alloc(eq->pool, GFP_ATOMIC); in mlx5_ib_eq_pf_process()
1603 schedule_work(&eq->work); in mlx5_ib_eq_pf_process()
1629 eq->dev, in mlx5_ib_eq_pf_process()
1634 mlx5_ib_dbg(eq->dev, in mlx5_ib_eq_pf_process()
1656 eq->dev, in mlx5_ib_eq_pf_process()
1685 eq->dev, in mlx5_ib_eq_pf_process()
1692 eq->dev, in mlx5_ib_eq_pf_process()
1699 mlx5_ib_warn(eq->dev, in mlx5_ib_eq_pf_process()
1707 pfault->eq = eq; in mlx5_ib_eq_pf_process()
1709 queue_work(eq->wq, &pfault->work); in mlx5_ib_eq_pf_process()
1711 cc = mlx5_eq_update_cc(eq->core, ++cc); in mlx5_ib_eq_pf_process()
1714 mlx5_eq_update_ci(eq->core, cc, 1); in mlx5_ib_eq_pf_process()
1720 struct mlx5_ib_pf_eq *eq = in mlx5_ib_eq_pf_int() local
1724 if (spin_trylock_irqsave(&eq->lock, flags)) { in mlx5_ib_eq_pf_int()
1725 mlx5_ib_eq_pf_process(eq); in mlx5_ib_eq_pf_int()
1726 spin_unlock_irqrestore(&eq->lock, flags); in mlx5_ib_eq_pf_int()
1728 schedule_work(&eq->work); in mlx5_ib_eq_pf_int()
1746 struct mlx5_ib_pf_eq *eq = in mlx5_ib_eq_pf_action() local
1749 mempool_refill(eq->pool); in mlx5_ib_eq_pf_action()
1751 spin_lock_irq(&eq->lock); in mlx5_ib_eq_pf_action()
1752 mlx5_ib_eq_pf_process(eq); in mlx5_ib_eq_pf_action()
1753 spin_unlock_irq(&eq->lock); in mlx5_ib_eq_pf_action()
1761 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) in mlx5r_odp_create_eq() argument
1767 if (eq->core) in mlx5r_odp_create_eq()
1769 INIT_WORK(&eq->work, mlx5_ib_eq_pf_action); in mlx5r_odp_create_eq()
1770 spin_lock_init(&eq->lock); in mlx5r_odp_create_eq()
1771 eq->dev = dev; in mlx5r_odp_create_eq()
1773 eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN, in mlx5r_odp_create_eq()
1775 if (!eq->pool) { in mlx5r_odp_create_eq()
1780 eq->wq = alloc_workqueue("mlx5_ib_page_fault", in mlx5r_odp_create_eq()
1783 if (!eq->wq) { in mlx5r_odp_create_eq()
1788 eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; in mlx5r_odp_create_eq()
1793 eq->core = mlx5_eq_create_generic(dev->mdev, ¶m); in mlx5r_odp_create_eq()
1794 if (IS_ERR(eq->core)) { in mlx5r_odp_create_eq()
1795 err = PTR_ERR(eq->core); in mlx5r_odp_create_eq()
1798 err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb); in mlx5r_odp_create_eq()
1800 mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err); in mlx5r_odp_create_eq()
1807 mlx5_eq_destroy_generic(dev->mdev, eq->core); in mlx5r_odp_create_eq()
1809 eq->core = NULL; in mlx5r_odp_create_eq()
1810 destroy_workqueue(eq->wq); in mlx5r_odp_create_eq()
1812 mempool_destroy(eq->pool); in mlx5r_odp_create_eq()
1819 mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) in mlx5_ib_odp_destroy_eq() argument
1823 if (!eq->core) in mlx5_ib_odp_destroy_eq()
1825 mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb); in mlx5_ib_odp_destroy_eq()
1826 err = mlx5_eq_destroy_generic(dev->mdev, eq->core); in mlx5_ib_odp_destroy_eq()
1827 cancel_work_sync(&eq->work); in mlx5_ib_odp_destroy_eq()
1828 destroy_workqueue(eq->wq); in mlx5_ib_odp_destroy_eq()
1829 mempool_destroy(eq->pool); in mlx5_ib_odp_destroy_eq()