1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2013-2021, Mellanox Technologies inc. All rights reserved. 4 */ 5 6 #include <linux/interrupt.h> 7 #include <linux/notifier.h> 8 #include <linux/mlx5/driver.h> 9 #include <linux/mlx5/vport.h> 10 #include <linux/mlx5/eq.h> 11 #ifdef CONFIG_RFS_ACCEL 12 #include <linux/cpu_rmap.h> 13 #endif 14 #include "mlx5_core.h" 15 #include "lib/eq.h" 16 #include "fpga/core.h" 17 #include "eswitch.h" 18 #include "lib/clock.h" 19 #include "diag/fw_tracer.h" 20 #include "mlx5_irq.h" 21 #include "pci_irq.h" 22 #include "devlink.h" 23 #include "en_accel/ipsec.h" 24 25 enum { 26 MLX5_EQE_OWNER_INIT_VAL = 0x1, 27 }; 28 29 enum { 30 MLX5_EQ_STATE_ARMED = 0x9, 31 MLX5_EQ_STATE_FIRED = 0xa, 32 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb, 33 }; 34 35 enum { 36 MLX5_EQ_DOORBEL_OFFSET = 0x40, 37 }; 38 39 /* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update 40 * the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is 41 * used to set the EQ size, budget must be smaller than the EQ size. 42 */ 43 enum { 44 MLX5_EQ_POLLING_BUDGET = 128, 45 }; 46 47 static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE); 48 49 struct mlx5_eq_table { 50 struct xarray comp_eqs; 51 struct mlx5_eq_async pages_eq; 52 struct mlx5_eq_async cmd_eq; 53 struct mlx5_eq_async async_eq; 54 55 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX]; 56 57 /* Since CQ DB is stored in async_eq */ 58 struct mlx5_nb cq_err_nb; 59 60 struct mutex lock; /* sync async eqs creations */ 61 struct mutex comp_lock; /* sync comp eqs creations */ 62 int curr_comp_eqs; 63 int max_comp_eqs; 64 struct mlx5_irq_table *irq_table; 65 struct xarray comp_irqs; 66 struct mlx5_irq *ctrl_irq; 67 struct cpu_rmap *rmap; 68 struct cpumask used_cpus; 69 }; 70 71 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ 72 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ 73 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ 74 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ 75 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ 76 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ 77 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 78 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 79 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ 80 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 81 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ 82 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) 83 mlx5_cmd_destroy_eq(struct mlx5_core_dev * dev,u8 eqn)84 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) 85 { 86 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {}; 87 88 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ); 89 MLX5_SET(destroy_eq_in, in, eq_number, eqn); 90 return mlx5_cmd_exec_in(dev, destroy_eq, in); 91 } 92 93 /* caller must eventually call mlx5_cq_put on the returned cq */ mlx5_eq_cq_get(struct mlx5_eq * eq,u32 cqn)94 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) 95 { 96 struct mlx5_cq_table *table = &eq->cq_table; 97 struct mlx5_core_cq *cq = NULL; 98 99 rcu_read_lock(); 100 cq = radix_tree_lookup(&table->tree, cqn); 101 if (likely(cq)) 102 mlx5_cq_hold(cq); 103 rcu_read_unlock(); 104 105 return cq; 106 } 107 mlx5_eq_comp_int(struct notifier_block * nb,__always_unused unsigned long action,__always_unused void * data)108 static int mlx5_eq_comp_int(struct notifier_block *nb, 109 __always_unused unsigned long action, 110 __always_unused void *data) 111 { 112 struct mlx5_eq_comp *eq_comp = 113 container_of(nb, struct mlx5_eq_comp, irq_nb); 114 struct mlx5_eq *eq = &eq_comp->core; 115 struct mlx5_eqe *eqe; 116 int num_eqes = 0; 117 u32 cqn = -1; 118 119 eqe = next_eqe_sw(eq); 120 if (!eqe) 121 goto out; 122 123 do { 124 struct mlx5_core_cq *cq; 125 126 /* Make sure we read EQ entry contents after we've 127 * checked the ownership bit. 128 */ 129 dma_rmb(); 130 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */ 131 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; 132 133 cq = mlx5_eq_cq_get(eq, cqn); 134 if (likely(cq)) { 135 ++cq->arm_sn; 136 cq->comp(cq, eqe); 137 mlx5_cq_put(cq); 138 } else { 139 dev_dbg_ratelimited(eq->dev->device, 140 "Completion event for bogus CQ 0x%x\n", cqn); 141 } 142 143 ++eq->cons_index; 144 145 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq))); 146 147 out: 148 eq_update_ci(eq, 1); 149 150 if (cqn != -1) 151 tasklet_schedule(&eq_comp->tasklet_ctx.task); 152 153 return 0; 154 } 155 156 /* Some architectures don't latch interrupts when they are disabled, so using 157 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to 158 * avoid losing them. It is not recommended to use it, unless this is the last 159 * resort. 160 */ mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp * eq)161 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq) 162 { 163 u32 count_eqe; 164 165 disable_irq(eq->core.irqn); 166 count_eqe = eq->core.cons_index; 167 mlx5_eq_comp_int(&eq->irq_nb, 0, NULL); 168 count_eqe = eq->core.cons_index - count_eqe; 169 enable_irq(eq->core.irqn); 170 171 return count_eqe; 172 } 173 mlx5_eq_async_int_lock(struct mlx5_eq_async * eq,bool recovery,unsigned long * flags)174 static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery, 175 unsigned long *flags) 176 __acquires(&eq->lock) 177 { 178 if (!recovery) 179 spin_lock(&eq->lock); 180 else 181 spin_lock_irqsave(&eq->lock, *flags); 182 } 183 mlx5_eq_async_int_unlock(struct mlx5_eq_async * eq,bool recovery,unsigned long * flags)184 static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery, 185 unsigned long *flags) 186 __releases(&eq->lock) 187 { 188 if (!recovery) 189 spin_unlock(&eq->lock); 190 else 191 spin_unlock_irqrestore(&eq->lock, *flags); 192 } 193 194 enum async_eq_nb_action { 195 ASYNC_EQ_IRQ_HANDLER = 0, 196 ASYNC_EQ_RECOVER = 1, 197 }; 198 mlx5_eq_async_int(struct notifier_block * nb,unsigned long action,void * data)199 static int mlx5_eq_async_int(struct notifier_block *nb, 200 unsigned long action, void *data) 201 { 202 struct mlx5_eq_async *eq_async = 203 container_of(nb, struct mlx5_eq_async, irq_nb); 204 struct mlx5_eq *eq = &eq_async->core; 205 struct mlx5_eq_table *eqt; 206 struct mlx5_core_dev *dev; 207 struct mlx5_eqe *eqe; 208 unsigned long flags; 209 int num_eqes = 0; 210 bool recovery; 211 212 dev = eq->dev; 213 eqt = dev->priv.eq_table; 214 215 recovery = action == ASYNC_EQ_RECOVER; 216 mlx5_eq_async_int_lock(eq_async, recovery, &flags); 217 218 eqe = next_eqe_sw(eq); 219 if (!eqe) 220 goto out; 221 222 do { 223 /* 224 * Make sure we read EQ entry contents after we've 225 * checked the ownership bit. 226 */ 227 dma_rmb(); 228 229 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe); 230 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe); 231 232 ++eq->cons_index; 233 234 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq))); 235 236 out: 237 eq_update_ci(eq, 1); 238 mlx5_eq_async_int_unlock(eq_async, recovery, &flags); 239 240 return unlikely(recovery) ? num_eqes : 0; 241 } 242 mlx5_cmd_eq_recover(struct mlx5_core_dev * dev)243 void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev) 244 { 245 struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq; 246 int eqes; 247 248 eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL); 249 if (eqes) 250 mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes); 251 } 252 init_eq_buf(struct mlx5_eq * eq)253 static void init_eq_buf(struct mlx5_eq *eq) 254 { 255 struct mlx5_eqe *eqe; 256 int i; 257 258 for (i = 0; i < eq_get_size(eq); i++) { 259 eqe = get_eqe(eq, i); 260 eqe->owner = MLX5_EQE_OWNER_INIT_VAL; 261 } 262 } 263 264 static int create_map_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct mlx5_eq_param * param)265 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 266 struct mlx5_eq_param *param) 267 { 268 u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE); 269 struct mlx5_cq_table *cq_table = &eq->cq_table; 270 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; 271 u8 log_eq_stride = ilog2(MLX5_EQE_SIZE); 272 struct mlx5_priv *priv = &dev->priv; 273 __be64 *pas; 274 u16 vecidx; 275 void *eqc; 276 int inlen; 277 u32 *in; 278 int err; 279 int i; 280 281 /* Init CQ table */ 282 memset(cq_table, 0, sizeof(*cq_table)); 283 spin_lock_init(&cq_table->lock); 284 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); 285 286 eq->cons_index = 0; 287 288 err = mlx5_frag_buf_alloc_node(dev, wq_get_byte_sz(log_eq_size, log_eq_stride), 289 &eq->frag_buf, dev->priv.numa_node); 290 if (err) 291 return err; 292 293 mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc); 294 init_eq_buf(eq); 295 296 eq->irq = param->irq; 297 vecidx = mlx5_irq_get_index(eq->irq); 298 299 inlen = MLX5_ST_SZ_BYTES(create_eq_in) + 300 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages; 301 302 in = kvzalloc(inlen, GFP_KERNEL); 303 if (!in) { 304 err = -ENOMEM; 305 goto err_buf; 306 } 307 308 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas); 309 mlx5_fill_page_frag_array(&eq->frag_buf, pas); 310 311 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); 312 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx)) 313 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID); 314 315 for (i = 0; i < 4; i++) 316 MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i, 317 param->mask[i]); 318 319 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); 320 MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz); 321 MLX5_SET(eqc, eqc, uar_page, priv->uar->index); 322 MLX5_SET(eqc, eqc, intr, vecidx); 323 MLX5_SET(eqc, eqc, log_page_size, 324 eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 325 326 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 327 if (err) 328 goto err_in; 329 330 eq->vecidx = vecidx; 331 eq->eqn = MLX5_GET(create_eq_out, out, eq_number); 332 eq->irqn = pci_irq_vector(dev->pdev, vecidx); 333 eq->dev = dev; 334 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET; 335 336 err = mlx5_debug_eq_add(dev, eq); 337 if (err) 338 goto err_eq; 339 340 kvfree(in); 341 return 0; 342 343 err_eq: 344 mlx5_cmd_destroy_eq(dev, eq->eqn); 345 346 err_in: 347 kvfree(in); 348 349 err_buf: 350 mlx5_frag_buf_free(dev, &eq->frag_buf); 351 return err; 352 } 353 354 /** 355 * mlx5_eq_enable - Enable EQ for receiving EQEs 356 * @dev : Device which owns the eq 357 * @eq : EQ to enable 358 * @nb : Notifier call block 359 * 360 * Must be called after EQ is created in device. 361 * 362 * @return: 0 if no error 363 */ mlx5_eq_enable(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct notifier_block * nb)364 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 365 struct notifier_block *nb) 366 { 367 int err; 368 369 err = mlx5_irq_attach_nb(eq->irq, nb); 370 if (!err) 371 eq_update_ci(eq, 1); 372 373 return err; 374 } 375 EXPORT_SYMBOL(mlx5_eq_enable); 376 377 /** 378 * mlx5_eq_disable - Disable EQ for receiving EQEs 379 * @dev : Device which owns the eq 380 * @eq : EQ to disable 381 * @nb : Notifier call block 382 * 383 * Must be called before EQ is destroyed. 384 */ mlx5_eq_disable(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct notifier_block * nb)385 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 386 struct notifier_block *nb) 387 { 388 mlx5_irq_detach_nb(eq->irq, nb); 389 } 390 EXPORT_SYMBOL(mlx5_eq_disable); 391 destroy_unmap_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq)392 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 393 { 394 int err; 395 396 mlx5_debug_eq_remove(dev, eq); 397 398 err = mlx5_cmd_destroy_eq(dev, eq->eqn); 399 if (err) 400 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", 401 eq->eqn); 402 403 mlx5_frag_buf_free(dev, &eq->frag_buf); 404 return err; 405 } 406 mlx5_eq_add_cq(struct mlx5_eq * eq,struct mlx5_core_cq * cq)407 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) 408 { 409 struct mlx5_cq_table *table = &eq->cq_table; 410 int err; 411 412 spin_lock(&table->lock); 413 err = radix_tree_insert(&table->tree, cq->cqn, cq); 414 spin_unlock(&table->lock); 415 416 return err; 417 } 418 mlx5_eq_del_cq(struct mlx5_eq * eq,struct mlx5_core_cq * cq)419 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) 420 { 421 struct mlx5_cq_table *table = &eq->cq_table; 422 struct mlx5_core_cq *tmp; 423 424 spin_lock(&table->lock); 425 tmp = radix_tree_delete(&table->tree, cq->cqn); 426 spin_unlock(&table->lock); 427 428 if (!tmp) { 429 mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", 430 eq->eqn, cq->cqn); 431 return; 432 } 433 434 if (tmp != cq) 435 mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", 436 eq->eqn, cq->cqn); 437 } 438 mlx5_eq_table_init(struct mlx5_core_dev * dev)439 int mlx5_eq_table_init(struct mlx5_core_dev *dev) 440 { 441 struct mlx5_eq_table *eq_table; 442 int i; 443 444 eq_table = kvzalloc_node(sizeof(*eq_table), GFP_KERNEL, 445 dev->priv.numa_node); 446 if (!eq_table) 447 return -ENOMEM; 448 449 dev->priv.eq_table = eq_table; 450 451 mlx5_eq_debugfs_init(dev); 452 453 mutex_init(&eq_table->lock); 454 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++) 455 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]); 456 457 eq_table->irq_table = mlx5_irq_table_get(dev); 458 cpumask_clear(&eq_table->used_cpus); 459 xa_init(&eq_table->comp_eqs); 460 xa_init(&eq_table->comp_irqs); 461 mutex_init(&eq_table->comp_lock); 462 eq_table->curr_comp_eqs = 0; 463 return 0; 464 } 465 mlx5_eq_table_cleanup(struct mlx5_core_dev * dev)466 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev) 467 { 468 struct mlx5_eq_table *table = dev->priv.eq_table; 469 470 mlx5_eq_debugfs_cleanup(dev); 471 xa_destroy(&table->comp_irqs); 472 xa_destroy(&table->comp_eqs); 473 kvfree(table); 474 } 475 476 /* Async EQs */ 477 create_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct mlx5_eq_param * param)478 static int create_async_eq(struct mlx5_core_dev *dev, 479 struct mlx5_eq *eq, struct mlx5_eq_param *param) 480 { 481 struct mlx5_eq_table *eq_table = dev->priv.eq_table; 482 int err; 483 484 mutex_lock(&eq_table->lock); 485 err = create_map_eq(dev, eq, param); 486 mutex_unlock(&eq_table->lock); 487 return err; 488 } 489 destroy_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq)490 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 491 { 492 struct mlx5_eq_table *eq_table = dev->priv.eq_table; 493 int err; 494 495 mutex_lock(&eq_table->lock); 496 err = destroy_unmap_eq(dev, eq); 497 mutex_unlock(&eq_table->lock); 498 return err; 499 } 500 cq_err_event_notifier(struct notifier_block * nb,unsigned long type,void * data)501 static int cq_err_event_notifier(struct notifier_block *nb, 502 unsigned long type, void *data) 503 { 504 struct mlx5_eq_table *eqt; 505 struct mlx5_core_cq *cq; 506 struct mlx5_eqe *eqe; 507 struct mlx5_eq *eq; 508 u32 cqn; 509 510 /* type == MLX5_EVENT_TYPE_CQ_ERROR */ 511 512 eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb); 513 eq = &eqt->async_eq.core; 514 eqe = data; 515 516 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; 517 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n", 518 cqn, eqe->data.cq_err.syndrome); 519 520 cq = mlx5_eq_cq_get(eq, cqn); 521 if (unlikely(!cq)) { 522 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn); 523 return NOTIFY_OK; 524 } 525 526 if (cq->event) 527 cq->event(cq, type); 528 529 mlx5_cq_put(cq); 530 531 return NOTIFY_OK; 532 } 533 gather_user_async_events(struct mlx5_core_dev * dev,u64 mask[4])534 static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4]) 535 { 536 __be64 *user_unaffiliated_events; 537 __be64 *user_affiliated_events; 538 int i; 539 540 user_affiliated_events = 541 MLX5_CAP_DEV_EVENT(dev, user_affiliated_events); 542 user_unaffiliated_events = 543 MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events); 544 545 for (i = 0; i < 4; i++) 546 mask[i] |= be64_to_cpu(user_affiliated_events[i] | 547 user_unaffiliated_events[i]); 548 } 549 gather_async_events_mask(struct mlx5_core_dev * dev,u64 mask[4])550 static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4]) 551 { 552 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; 553 554 if (MLX5_VPORT_MANAGER(dev)) 555 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); 556 557 if (MLX5_CAP_GEN(dev, general_notification_event)) 558 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT); 559 560 if (MLX5_CAP_GEN(dev, port_module_event)) 561 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT); 562 else 563 mlx5_core_dbg(dev, "port_module_event is not set\n"); 564 565 if (MLX5_PPS_CAP(dev)) 566 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); 567 568 if (MLX5_CAP_GEN(dev, fpga)) 569 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) | 570 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR); 571 if (MLX5_CAP_GEN_MAX(dev, dct)) 572 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED); 573 574 if (MLX5_CAP_GEN(dev, temp_warn_event)) 575 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT); 576 577 if (MLX5_CAP_MCAM_REG(dev, tracer_registers)) 578 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER); 579 580 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters)) 581 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER); 582 583 if (mlx5_eswitch_is_funcs_handler(dev)) 584 async_event_mask |= 585 (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED); 586 587 if (MLX5_CAP_GEN_MAX(dev, vhca_state)) 588 async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE); 589 590 if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload)) 591 async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE); 592 593 if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) 594 async_event_mask |= 595 (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE); 596 597 mask[0] = async_event_mask; 598 599 if (MLX5_CAP_GEN(dev, event_cap)) 600 gather_user_async_events(dev, mask); 601 } 602 603 static int setup_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq_async * eq,struct mlx5_eq_param * param,const char * name)604 setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq, 605 struct mlx5_eq_param *param, const char *name) 606 { 607 int err; 608 609 eq->irq_nb.notifier_call = mlx5_eq_async_int; 610 spin_lock_init(&eq->lock); 611 612 err = create_async_eq(dev, &eq->core, param); 613 if (err) { 614 mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err); 615 return err; 616 } 617 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb); 618 if (err) { 619 mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err); 620 destroy_async_eq(dev, &eq->core); 621 } 622 return err; 623 } 624 cleanup_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq_async * eq,const char * name)625 static void cleanup_async_eq(struct mlx5_core_dev *dev, 626 struct mlx5_eq_async *eq, const char *name) 627 { 628 int err; 629 630 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); 631 err = destroy_async_eq(dev, &eq->core); 632 if (err) 633 mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n", 634 name, err); 635 } 636 async_eq_depth_devlink_param_get(struct mlx5_core_dev * dev)637 static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev) 638 { 639 struct devlink *devlink = priv_to_devlink(dev); 640 union devlink_param_value val; 641 int err; 642 643 err = devl_param_driverinit_value_get(devlink, 644 DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE, 645 &val); 646 if (!err) 647 return val.vu32; 648 mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err); 649 return MLX5_NUM_ASYNC_EQE; 650 } 651 create_async_eqs(struct mlx5_core_dev * dev)652 static int create_async_eqs(struct mlx5_core_dev *dev) 653 { 654 struct mlx5_eq_table *table = dev->priv.eq_table; 655 struct mlx5_eq_param param = {}; 656 int err; 657 658 /* All the async_eqs are using single IRQ, request one IRQ and share its 659 * index among all the async_eqs of this device. 660 */ 661 table->ctrl_irq = mlx5_ctrl_irq_request(dev); 662 if (IS_ERR(table->ctrl_irq)) 663 return PTR_ERR(table->ctrl_irq); 664 665 MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR); 666 mlx5_eq_notifier_register(dev, &table->cq_err_nb); 667 668 param = (struct mlx5_eq_param) { 669 .irq = table->ctrl_irq, 670 .nent = MLX5_NUM_CMD_EQE, 671 .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, 672 }; 673 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ); 674 err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd"); 675 if (err) 676 goto err1; 677 678 mlx5_cmd_use_events(dev); 679 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); 680 681 param = (struct mlx5_eq_param) { 682 .irq = table->ctrl_irq, 683 .nent = async_eq_depth_devlink_param_get(dev), 684 }; 685 686 gather_async_events_mask(dev, param.mask); 687 err = setup_async_eq(dev, &table->async_eq, ¶m, "async"); 688 if (err) 689 goto err2; 690 691 /* Skip page eq creation when the device does not request for page requests */ 692 if (MLX5_CAP_GEN(dev, page_request_disable)) { 693 mlx5_core_dbg(dev, "Skip page EQ creation\n"); 694 return 0; 695 } 696 697 param = (struct mlx5_eq_param) { 698 .irq = table->ctrl_irq, 699 .nent = /* TODO: sriov max_vf + */ 1, 700 .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST, 701 }; 702 703 err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages"); 704 if (err) 705 goto err3; 706 707 return 0; 708 709 err3: 710 cleanup_async_eq(dev, &table->async_eq, "async"); 711 err2: 712 mlx5_cmd_use_polling(dev); 713 cleanup_async_eq(dev, &table->cmd_eq, "cmd"); 714 err1: 715 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); 716 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); 717 mlx5_ctrl_irq_release(dev, table->ctrl_irq); 718 return err; 719 } 720 destroy_async_eqs(struct mlx5_core_dev * dev)721 static void destroy_async_eqs(struct mlx5_core_dev *dev) 722 { 723 struct mlx5_eq_table *table = dev->priv.eq_table; 724 725 if (!MLX5_CAP_GEN(dev, page_request_disable)) 726 cleanup_async_eq(dev, &table->pages_eq, "pages"); 727 cleanup_async_eq(dev, &table->async_eq, "async"); 728 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ); 729 mlx5_cmd_use_polling(dev); 730 cleanup_async_eq(dev, &table->cmd_eq, "cmd"); 731 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); 732 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); 733 mlx5_ctrl_irq_release(dev, table->ctrl_irq); 734 } 735 mlx5_get_async_eq(struct mlx5_core_dev * dev)736 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev) 737 { 738 return &dev->priv.eq_table->async_eq.core; 739 } 740 mlx5_eq_synchronize_async_irq(struct mlx5_core_dev * dev)741 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev) 742 { 743 synchronize_irq(dev->priv.eq_table->async_eq.core.irqn); 744 } 745 mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev * dev)746 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev) 747 { 748 synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn); 749 } 750 751 /* Generic EQ API for mlx5_core consumers 752 * Needed For RDMA ODP EQ for now 753 */ 754 struct mlx5_eq * mlx5_eq_create_generic(struct mlx5_core_dev * dev,struct mlx5_eq_param * param)755 mlx5_eq_create_generic(struct mlx5_core_dev *dev, 756 struct mlx5_eq_param *param) 757 { 758 struct mlx5_eq *eq = kvzalloc_node(sizeof(*eq), GFP_KERNEL, 759 dev->priv.numa_node); 760 int err; 761 762 if (!eq) 763 return ERR_PTR(-ENOMEM); 764 765 param->irq = dev->priv.eq_table->ctrl_irq; 766 err = create_async_eq(dev, eq, param); 767 if (err) { 768 kvfree(eq); 769 eq = ERR_PTR(err); 770 } 771 772 return eq; 773 } 774 EXPORT_SYMBOL(mlx5_eq_create_generic); 775 mlx5_eq_destroy_generic(struct mlx5_core_dev * dev,struct mlx5_eq * eq)776 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 777 { 778 int err; 779 780 if (IS_ERR(eq)) 781 return -EINVAL; 782 783 err = destroy_async_eq(dev, eq); 784 if (err) 785 goto out; 786 787 kvfree(eq); 788 out: 789 return err; 790 } 791 EXPORT_SYMBOL(mlx5_eq_destroy_generic); 792 mlx5_eq_get_eqe(struct mlx5_eq * eq,u32 cc)793 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc) 794 { 795 u32 ci = eq->cons_index + cc; 796 u32 nent = eq_get_size(eq); 797 struct mlx5_eqe *eqe; 798 799 eqe = get_eqe(eq, ci & (nent - 1)); 800 eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe; 801 /* Make sure we read EQ entry contents after we've 802 * checked the ownership bit. 803 */ 804 if (eqe) 805 dma_rmb(); 806 807 return eqe; 808 } 809 EXPORT_SYMBOL(mlx5_eq_get_eqe); 810 mlx5_eq_update_ci(struct mlx5_eq * eq,u32 cc,bool arm)811 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm) 812 { 813 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); 814 u32 val; 815 816 eq->cons_index += cc; 817 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); 818 819 __raw_writel((__force u32)cpu_to_be32(val), addr); 820 /* We still want ordering, just not swabbing, so add a barrier */ 821 wmb(); 822 } 823 EXPORT_SYMBOL(mlx5_eq_update_ci); 824 comp_irq_release_pci(struct mlx5_core_dev * dev,u16 vecidx)825 static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx) 826 { 827 struct mlx5_eq_table *table = dev->priv.eq_table; 828 struct mlx5_irq *irq; 829 830 irq = xa_load(&table->comp_irqs, vecidx); 831 if (!irq) 832 return; 833 834 xa_erase(&table->comp_irqs, vecidx); 835 mlx5_irq_release_vector(irq); 836 } 837 mlx5_cpumask_default_spread(struct mlx5_core_dev * dev,int index)838 static int mlx5_cpumask_default_spread(struct mlx5_core_dev *dev, int index) 839 { 840 return cpumask_local_spread(index, dev->priv.numa_node); 841 } 842 mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev * dev)843 static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev) 844 { 845 #ifdef CONFIG_RFS_ACCEL 846 #ifdef CONFIG_MLX5_SF 847 if (mlx5_core_is_sf(dev)) 848 return dev->priv.parent_mdev->priv.eq_table->rmap; 849 #endif 850 return dev->priv.eq_table->rmap; 851 #else 852 return NULL; 853 #endif 854 } 855 comp_irq_request_pci(struct mlx5_core_dev * dev,u16 vecidx)856 static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx) 857 { 858 struct mlx5_eq_table *table = dev->priv.eq_table; 859 struct cpu_rmap *rmap; 860 struct mlx5_irq *irq; 861 int cpu; 862 863 rmap = mlx5_eq_table_get_pci_rmap(dev); 864 cpu = mlx5_cpumask_default_spread(dev, vecidx); 865 irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap); 866 if (IS_ERR(irq)) 867 return PTR_ERR(irq); 868 869 return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL)); 870 } 871 comp_irq_release_sf(struct mlx5_core_dev * dev,u16 vecidx)872 static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx) 873 { 874 struct mlx5_eq_table *table = dev->priv.eq_table; 875 struct mlx5_irq *irq; 876 int cpu; 877 878 irq = xa_load(&table->comp_irqs, vecidx); 879 if (!irq) 880 return; 881 882 cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq)); 883 cpumask_clear_cpu(cpu, &table->used_cpus); 884 xa_erase(&table->comp_irqs, vecidx); 885 mlx5_irq_affinity_irq_release(dev, irq); 886 } 887 comp_irq_request_sf(struct mlx5_core_dev * dev,u16 vecidx)888 static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) 889 { 890 struct mlx5_eq_table *table = dev->priv.eq_table; 891 struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); 892 struct irq_affinity_desc af_desc = {}; 893 struct mlx5_irq *irq; 894 895 /* In case SF irq pool does not exist, fallback to the PF irqs*/ 896 if (!mlx5_irq_pool_is_sf_pool(pool)) 897 return comp_irq_request_pci(dev, vecidx); 898 899 af_desc.is_managed = false; 900 cpumask_copy(&af_desc.mask, cpu_online_mask); 901 cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus); 902 irq = mlx5_irq_affinity_request(dev, pool, &af_desc); 903 if (IS_ERR(irq)) 904 return PTR_ERR(irq); 905 906 cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq)); 907 mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n", 908 pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)), 909 cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)), 910 mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ); 911 912 return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL)); 913 } 914 comp_irq_release(struct mlx5_core_dev * dev,u16 vecidx)915 static void comp_irq_release(struct mlx5_core_dev *dev, u16 vecidx) 916 { 917 mlx5_core_is_sf(dev) ? comp_irq_release_sf(dev, vecidx) : 918 comp_irq_release_pci(dev, vecidx); 919 } 920 comp_irq_request(struct mlx5_core_dev * dev,u16 vecidx)921 static int comp_irq_request(struct mlx5_core_dev *dev, u16 vecidx) 922 { 923 return mlx5_core_is_sf(dev) ? comp_irq_request_sf(dev, vecidx) : 924 comp_irq_request_pci(dev, vecidx); 925 } 926 927 #ifdef CONFIG_RFS_ACCEL alloc_rmap(struct mlx5_core_dev * mdev)928 static int alloc_rmap(struct mlx5_core_dev *mdev) 929 { 930 struct mlx5_eq_table *eq_table = mdev->priv.eq_table; 931 932 /* rmap is a mapping between irq number and queue number. 933 * Each irq can be assigned only to a single rmap. 934 * Since SFs share IRQs, rmap mapping cannot function correctly 935 * for irqs that are shared between different core/netdev RX rings. 936 * Hence we don't allow netdev rmap for SFs. 937 */ 938 if (mlx5_core_is_sf(mdev)) 939 return 0; 940 941 eq_table->rmap = alloc_irq_cpu_rmap(eq_table->max_comp_eqs); 942 if (!eq_table->rmap) 943 return -ENOMEM; 944 return 0; 945 } 946 free_rmap(struct mlx5_core_dev * mdev)947 static void free_rmap(struct mlx5_core_dev *mdev) 948 { 949 struct mlx5_eq_table *eq_table = mdev->priv.eq_table; 950 951 if (eq_table->rmap) { 952 free_irq_cpu_rmap(eq_table->rmap); 953 eq_table->rmap = NULL; 954 } 955 } 956 #else alloc_rmap(struct mlx5_core_dev * mdev)957 static int alloc_rmap(struct mlx5_core_dev *mdev) { return 0; } free_rmap(struct mlx5_core_dev * mdev)958 static void free_rmap(struct mlx5_core_dev *mdev) {} 959 #endif 960 destroy_comp_eq(struct mlx5_core_dev * dev,struct mlx5_eq_comp * eq,u16 vecidx)961 static void destroy_comp_eq(struct mlx5_core_dev *dev, struct mlx5_eq_comp *eq, u16 vecidx) 962 { 963 struct mlx5_eq_table *table = dev->priv.eq_table; 964 965 xa_erase(&table->comp_eqs, vecidx); 966 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); 967 if (destroy_unmap_eq(dev, &eq->core)) 968 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n", 969 eq->core.eqn); 970 tasklet_disable(&eq->tasklet_ctx.task); 971 kfree(eq); 972 comp_irq_release(dev, vecidx); 973 table->curr_comp_eqs--; 974 } 975 comp_eq_depth_devlink_param_get(struct mlx5_core_dev * dev)976 static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev) 977 { 978 struct devlink *devlink = priv_to_devlink(dev); 979 union devlink_param_value val; 980 int err; 981 982 err = devl_param_driverinit_value_get(devlink, 983 DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE, 984 &val); 985 if (!err) 986 return val.vu32; 987 mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err); 988 return MLX5_COMP_EQ_SIZE; 989 } 990 991 /* Must be called with EQ table comp_lock held */ create_comp_eq(struct mlx5_core_dev * dev,u16 vecidx)992 static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx) 993 { 994 struct mlx5_eq_table *table = dev->priv.eq_table; 995 struct mlx5_eq_param param = {}; 996 struct mlx5_eq_comp *eq; 997 struct mlx5_irq *irq; 998 int nent; 999 int err; 1000 1001 lockdep_assert_held(&table->comp_lock); 1002 if (table->curr_comp_eqs == table->max_comp_eqs) { 1003 mlx5_core_err(dev, "maximum number of vectors is allocated, %d\n", 1004 table->max_comp_eqs); 1005 return -ENOMEM; 1006 } 1007 1008 err = comp_irq_request(dev, vecidx); 1009 if (err) 1010 return err; 1011 1012 nent = comp_eq_depth_devlink_param_get(dev); 1013 1014 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node); 1015 if (!eq) { 1016 err = -ENOMEM; 1017 goto clean_irq; 1018 } 1019 1020 INIT_LIST_HEAD(&eq->tasklet_ctx.list); 1021 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); 1022 spin_lock_init(&eq->tasklet_ctx.lock); 1023 tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb); 1024 1025 irq = xa_load(&table->comp_irqs, vecidx); 1026 eq->irq_nb.notifier_call = mlx5_eq_comp_int; 1027 param = (struct mlx5_eq_param) { 1028 .irq = irq, 1029 .nent = nent, 1030 }; 1031 1032 err = create_map_eq(dev, &eq->core, ¶m); 1033 if (err) 1034 goto clean_eq; 1035 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb); 1036 if (err) { 1037 destroy_unmap_eq(dev, &eq->core); 1038 goto clean_eq; 1039 } 1040 1041 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn); 1042 err = xa_err(xa_store(&table->comp_eqs, vecidx, eq, GFP_KERNEL)); 1043 if (err) 1044 goto disable_eq; 1045 1046 table->curr_comp_eqs++; 1047 return eq->core.eqn; 1048 1049 disable_eq: 1050 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); 1051 clean_eq: 1052 kfree(eq); 1053 clean_irq: 1054 comp_irq_release(dev, vecidx); 1055 return err; 1056 } 1057 mlx5_comp_eqn_get(struct mlx5_core_dev * dev,u16 vecidx,int * eqn)1058 int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn) 1059 { 1060 struct mlx5_eq_table *table = dev->priv.eq_table; 1061 struct mlx5_eq_comp *eq; 1062 int ret = 0; 1063 1064 if (vecidx >= table->max_comp_eqs) { 1065 mlx5_core_dbg(dev, "Requested vector index %u should be less than %u", 1066 vecidx, table->max_comp_eqs); 1067 return -EINVAL; 1068 } 1069 1070 mutex_lock(&table->comp_lock); 1071 eq = xa_load(&table->comp_eqs, vecidx); 1072 if (eq) { 1073 *eqn = eq->core.eqn; 1074 goto out; 1075 } 1076 1077 ret = create_comp_eq(dev, vecidx); 1078 if (ret < 0) { 1079 mutex_unlock(&table->comp_lock); 1080 return ret; 1081 } 1082 1083 *eqn = ret; 1084 out: 1085 mutex_unlock(&table->comp_lock); 1086 return 0; 1087 } 1088 EXPORT_SYMBOL(mlx5_comp_eqn_get); 1089 mlx5_comp_irqn_get(struct mlx5_core_dev * dev,int vector,unsigned int * irqn)1090 int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn) 1091 { 1092 struct mlx5_eq_table *table = dev->priv.eq_table; 1093 struct mlx5_eq_comp *eq; 1094 int eqn; 1095 int err; 1096 1097 /* Allocate the EQ if not allocated yet */ 1098 err = mlx5_comp_eqn_get(dev, vector, &eqn); 1099 if (err) 1100 return err; 1101 1102 eq = xa_load(&table->comp_eqs, vector); 1103 *irqn = eq->core.irqn; 1104 return 0; 1105 } 1106 mlx5_comp_vectors_max(struct mlx5_core_dev * dev)1107 unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev) 1108 { 1109 return dev->priv.eq_table->max_comp_eqs; 1110 } 1111 EXPORT_SYMBOL(mlx5_comp_vectors_max); 1112 1113 static struct cpumask * mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev * dev,int vector)1114 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector) 1115 { 1116 struct mlx5_eq_table *table = dev->priv.eq_table; 1117 struct mlx5_eq_comp *eq; 1118 1119 eq = xa_load(&table->comp_eqs, vector); 1120 if (eq) 1121 return mlx5_irq_get_affinity_mask(eq->core.irq); 1122 1123 return NULL; 1124 } 1125 mlx5_comp_vector_get_cpu(struct mlx5_core_dev * dev,int vector)1126 int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector) 1127 { 1128 struct cpumask *mask; 1129 int cpu; 1130 1131 mask = mlx5_comp_irq_get_affinity_mask(dev, vector); 1132 if (mask) 1133 cpu = cpumask_first(mask); 1134 else 1135 cpu = mlx5_cpumask_default_spread(dev, vector); 1136 1137 return cpu; 1138 } 1139 EXPORT_SYMBOL(mlx5_comp_vector_get_cpu); 1140 1141 #ifdef CONFIG_RFS_ACCEL mlx5_eq_table_get_rmap(struct mlx5_core_dev * dev)1142 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev) 1143 { 1144 return dev->priv.eq_table->rmap; 1145 } 1146 #endif 1147 mlx5_eqn2comp_eq(struct mlx5_core_dev * dev,int eqn)1148 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn) 1149 { 1150 struct mlx5_eq_table *table = dev->priv.eq_table; 1151 struct mlx5_eq_comp *eq; 1152 unsigned long index; 1153 1154 xa_for_each(&table->comp_eqs, index, eq) 1155 if (eq->core.eqn == eqn) 1156 return eq; 1157 1158 return ERR_PTR(-ENOENT); 1159 } 1160 1161 /* This function should only be called after mlx5_cmd_force_teardown_hca */ mlx5_core_eq_free_irqs(struct mlx5_core_dev * dev)1162 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) 1163 { 1164 mlx5_irq_table_free_irqs(dev); 1165 } 1166 1167 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1168 #define MLX5_MAX_ASYNC_EQS 4 1169 #else 1170 #define MLX5_MAX_ASYNC_EQS 3 1171 #endif 1172 get_num_eqs(struct mlx5_core_dev * dev)1173 static int get_num_eqs(struct mlx5_core_dev *dev) 1174 { 1175 struct mlx5_eq_table *eq_table = dev->priv.eq_table; 1176 int max_dev_eqs; 1177 int num_eqs; 1178 1179 /* If ethernet is disabled we use just a single completion vector to 1180 * have the other vectors available for other drivers using mlx5_core. For 1181 * example, mlx5_vdpa 1182 */ 1183 if (!mlx5_core_is_eth_enabled(dev) && mlx5_eth_supported(dev)) 1184 return 1; 1185 1186 max_dev_eqs = mlx5_max_eq_cap_get(dev); 1187 1188 num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table), 1189 max_dev_eqs - MLX5_MAX_ASYNC_EQS); 1190 if (mlx5_core_is_sf(dev)) { 1191 int max_eqs_sf = MLX5_CAP_GEN_2(dev, sf_eq_usage) ? 1192 MLX5_CAP_GEN_2(dev, max_num_eqs_24b) : 1193 MLX5_COMP_EQS_PER_SF; 1194 1195 max_eqs_sf = min_t(int, max_eqs_sf, 1196 mlx5_irq_table_get_sfs_vec(eq_table->irq_table)); 1197 num_eqs = min_t(int, num_eqs, max_eqs_sf); 1198 } 1199 1200 return num_eqs; 1201 } 1202 mlx5_eq_table_create(struct mlx5_core_dev * dev)1203 int mlx5_eq_table_create(struct mlx5_core_dev *dev) 1204 { 1205 struct mlx5_eq_table *eq_table = dev->priv.eq_table; 1206 int err; 1207 1208 eq_table->max_comp_eqs = get_num_eqs(dev); 1209 err = create_async_eqs(dev); 1210 if (err) { 1211 mlx5_core_err(dev, "Failed to create async EQs\n"); 1212 goto err_async_eqs; 1213 } 1214 1215 err = alloc_rmap(dev); 1216 if (err) { 1217 mlx5_core_err(dev, "Failed to allocate rmap\n"); 1218 goto err_rmap; 1219 } 1220 1221 return 0; 1222 1223 err_rmap: 1224 destroy_async_eqs(dev); 1225 err_async_eqs: 1226 return err; 1227 } 1228 mlx5_eq_table_destroy(struct mlx5_core_dev * dev)1229 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev) 1230 { 1231 struct mlx5_eq_table *table = dev->priv.eq_table; 1232 struct mlx5_eq_comp *eq; 1233 unsigned long index; 1234 1235 xa_for_each(&table->comp_eqs, index, eq) 1236 destroy_comp_eq(dev, eq, index); 1237 1238 free_rmap(dev); 1239 destroy_async_eqs(dev); 1240 } 1241 mlx5_eq_notifier_register(struct mlx5_core_dev * dev,struct mlx5_nb * nb)1242 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb) 1243 { 1244 struct mlx5_eq_table *eqt = dev->priv.eq_table; 1245 1246 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb); 1247 } 1248 EXPORT_SYMBOL(mlx5_eq_notifier_register); 1249 mlx5_eq_notifier_unregister(struct mlx5_core_dev * dev,struct mlx5_nb * nb)1250 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb) 1251 { 1252 struct mlx5_eq_table *eqt = dev->priv.eq_table; 1253 1254 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb); 1255 } 1256 EXPORT_SYMBOL(mlx5_eq_notifier_unregister); 1257