Lines Matching full:ndev

143 static int xeon_init_isr(struct intel_ntb_dev *ndev);
145 static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev) in ndev_reset_unsafe_flags() argument
147 ndev->unsafe_flags = 0; in ndev_reset_unsafe_flags()
148 ndev->unsafe_flags_ignore = 0; in ndev_reset_unsafe_flags()
151 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) in ndev_reset_unsafe_flags()
152 if (!ntb_topo_is_b2b(ndev->ntb.topo)) in ndev_reset_unsafe_flags()
153 ndev->unsafe_flags |= NTB_UNSAFE_DB; in ndev_reset_unsafe_flags()
156 if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) { in ndev_reset_unsafe_flags()
157 ndev->unsafe_flags |= NTB_UNSAFE_DB; in ndev_reset_unsafe_flags()
158 ndev->unsafe_flags |= NTB_UNSAFE_SPAD; in ndev_reset_unsafe_flags()
162 static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev, in ndev_is_unsafe() argument
165 return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore); in ndev_is_unsafe()
168 static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev, in ndev_ignore_unsafe() argument
171 flag &= ndev->unsafe_flags; in ndev_ignore_unsafe()
172 ndev->unsafe_flags_ignore |= flag; in ndev_ignore_unsafe()
177 int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx) in ndev_mw_to_bar() argument
179 if (idx < 0 || idx >= ndev->mw_count) in ndev_mw_to_bar()
181 return ndev->reg->mw_bar[idx]; in ndev_mw_to_bar()
184 void ndev_db_addr(struct intel_ntb_dev *ndev, in ndev_db_addr() argument
188 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_addr()
193 dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr); in ndev_db_addr()
197 *db_size = ndev->reg->db_size; in ndev_db_addr()
198 dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size); in ndev_db_addr()
202 u64 ndev_db_read(struct intel_ntb_dev *ndev, in ndev_db_read() argument
205 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_read()
208 return ndev->reg->db_ioread(mmio); in ndev_db_read()
211 int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, in ndev_db_write() argument
214 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_write()
217 if (db_bits & ~ndev->db_valid_mask) in ndev_db_write()
220 ndev->reg->db_iowrite(db_bits, mmio); in ndev_db_write()
225 static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits, in ndev_db_set_mask() argument
230 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_set_mask()
233 if (db_bits & ~ndev->db_valid_mask) in ndev_db_set_mask()
236 spin_lock_irqsave(&ndev->db_mask_lock, irqflags); in ndev_db_set_mask()
238 ndev->db_mask |= db_bits; in ndev_db_set_mask()
239 ndev->reg->db_iowrite(ndev->db_mask, mmio); in ndev_db_set_mask()
241 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags); in ndev_db_set_mask()
246 static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits, in ndev_db_clear_mask() argument
251 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_clear_mask()
254 if (db_bits & ~ndev->db_valid_mask) in ndev_db_clear_mask()
257 spin_lock_irqsave(&ndev->db_mask_lock, irqflags); in ndev_db_clear_mask()
259 ndev->db_mask &= ~db_bits; in ndev_db_clear_mask()
260 ndev->reg->db_iowrite(ndev->db_mask, mmio); in ndev_db_clear_mask()
262 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags); in ndev_db_clear_mask()
267 static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) in ndev_vec_mask() argument
271 shift = ndev->db_vec_shift; in ndev_vec_mask()
277 static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx, in ndev_spad_addr() argument
281 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD)) in ndev_spad_addr()
284 if (idx < 0 || idx >= ndev->spad_count) in ndev_spad_addr()
289 dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n", in ndev_spad_addr()
296 static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx, in ndev_spad_read() argument
299 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD)) in ndev_spad_read()
302 if (idx < 0 || idx >= ndev->spad_count) in ndev_spad_read()
308 static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val, in ndev_spad_write() argument
311 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD)) in ndev_spad_write()
314 if (idx < 0 || idx >= ndev->spad_count) in ndev_spad_write()
322 static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec) in ndev_interrupt() argument
326 vec_mask = ndev_vec_mask(ndev, vec); in ndev_interrupt()
328 if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31)) in ndev_interrupt()
329 vec_mask |= ndev->db_link_mask; in ndev_interrupt()
331 dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask); in ndev_interrupt()
333 ndev->last_ts = jiffies; in ndev_interrupt()
335 if (vec_mask & ndev->db_link_mask) { in ndev_interrupt()
336 if (ndev->reg->poll_link(ndev)) in ndev_interrupt()
337 ntb_link_event(&ndev->ntb); in ndev_interrupt()
340 if (vec_mask & ndev->db_valid_mask) in ndev_interrupt()
341 ntb_db_event(&ndev->ntb, vec); in ndev_interrupt()
350 dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n", in ndev_vec_isr()
353 return ndev_interrupt(nvec->ndev, nvec->num); in ndev_vec_isr()
358 struct intel_ntb_dev *ndev = dev; in ndev_irq_isr() local
360 return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq); in ndev_irq_isr()
363 int ndev_init_isr(struct intel_ntb_dev *ndev, in ndev_init_isr() argument
370 pdev = ndev->ntb.pdev; in ndev_init_isr()
375 ndev->db_mask = ndev->db_valid_mask; in ndev_init_isr()
376 ndev->reg->db_iowrite(ndev->db_mask, in ndev_init_isr()
377 ndev->self_mmio + in ndev_init_isr()
378 ndev->self_reg->db_mask); in ndev_init_isr()
382 ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec), in ndev_init_isr()
384 if (!ndev->vec) in ndev_init_isr()
387 ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix), in ndev_init_isr()
389 if (!ndev->msix) in ndev_init_isr()
393 ndev->msix[i].entry = i; in ndev_init_isr()
395 msix_count = pci_enable_msix_range(pdev, ndev->msix, in ndev_init_isr()
401 ndev->vec[i].ndev = ndev; in ndev_init_isr()
402 ndev->vec[i].num = i; in ndev_init_isr()
403 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0, in ndev_init_isr()
404 "ndev_vec_isr", &ndev->vec[i]); in ndev_init_isr()
410 ndev->db_vec_count = msix_count; in ndev_init_isr()
411 ndev->db_vec_shift = msix_shift; in ndev_init_isr()
416 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_init_isr()
419 kfree(ndev->msix); in ndev_init_isr()
421 kfree(ndev->vec); in ndev_init_isr()
423 ndev->msix = NULL; in ndev_init_isr()
424 ndev->vec = NULL; in ndev_init_isr()
433 "ndev_irq_isr", ndev); in ndev_init_isr()
438 ndev->db_vec_count = 1; in ndev_init_isr()
439 ndev->db_vec_shift = total_shift; in ndev_init_isr()
451 "ndev_irq_isr", ndev); in ndev_init_isr()
456 ndev->db_vec_count = 1; in ndev_init_isr()
457 ndev->db_vec_shift = total_shift; in ndev_init_isr()
464 static void ndev_deinit_isr(struct intel_ntb_dev *ndev) in ndev_deinit_isr() argument
469 pdev = ndev->ntb.pdev; in ndev_deinit_isr()
472 ndev->db_mask = ndev->db_valid_mask; in ndev_deinit_isr()
473 ndev->reg->db_iowrite(ndev->db_mask, in ndev_deinit_isr()
474 ndev->self_mmio + in ndev_deinit_isr()
475 ndev->self_reg->db_mask); in ndev_deinit_isr()
477 if (ndev->msix) { in ndev_deinit_isr()
478 i = ndev->db_vec_count; in ndev_deinit_isr()
480 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_deinit_isr()
482 kfree(ndev->msix); in ndev_deinit_isr()
483 kfree(ndev->vec); in ndev_deinit_isr()
485 free_irq(pdev->irq, ndev); in ndev_deinit_isr()
494 struct intel_ntb_dev *ndev; in ndev_ntb_debugfs_read() local
502 ndev = filp->private_data; in ndev_ntb_debugfs_read()
503 pdev = ndev->ntb.pdev; in ndev_ntb_debugfs_read()
504 mmio = ndev->self_mmio; in ndev_ntb_debugfs_read()
519 ntb_topo_string(ndev->ntb.topo)); in ndev_ntb_debugfs_read()
521 if (ndev->b2b_idx != UINT_MAX) { in ndev_ntb_debugfs_read()
523 "B2B MW Idx -\t\t%u\n", ndev->b2b_idx); in ndev_ntb_debugfs_read()
525 "B2B Offset -\t\t%#lx\n", ndev->b2b_off); in ndev_ntb_debugfs_read()
530 ndev->bar4_split ? "yes" : "no"); in ndev_ntb_debugfs_read()
533 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl); in ndev_ntb_debugfs_read()
535 "LNK STA -\t\t%#06x\n", ndev->lnk_sta); in ndev_ntb_debugfs_read()
537 if (!ndev->reg->link_is_up(ndev)) { in ndev_ntb_debugfs_read()
545 NTB_LNK_STA_SPEED(ndev->lnk_sta)); in ndev_ntb_debugfs_read()
548 NTB_LNK_STA_WIDTH(ndev->lnk_sta)); in ndev_ntb_debugfs_read()
552 "Memory Window Count -\t%u\n", ndev->mw_count); in ndev_ntb_debugfs_read()
554 "Scratchpad Count -\t%u\n", ndev->spad_count); in ndev_ntb_debugfs_read()
556 "Doorbell Count -\t%u\n", ndev->db_count); in ndev_ntb_debugfs_read()
558 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count); in ndev_ntb_debugfs_read()
560 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift); in ndev_ntb_debugfs_read()
563 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); in ndev_ntb_debugfs_read()
565 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask); in ndev_ntb_debugfs_read()
567 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask); in ndev_ntb_debugfs_read()
569 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask); in ndev_ntb_debugfs_read()
573 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell); in ndev_ntb_debugfs_read()
583 if (!ndev->bar4_split) { in ndev_ntb_debugfs_read()
599 if (!ndev->bar4_split) { in ndev_ntb_debugfs_read()
615 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2)); in ndev_ntb_debugfs_read()
619 if (ndev->bar4_split) { in ndev_ntb_debugfs_read()
620 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4)); in ndev_ntb_debugfs_read()
624 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5)); in ndev_ntb_debugfs_read()
628 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4)); in ndev_ntb_debugfs_read()
633 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2)); in ndev_ntb_debugfs_read()
637 if (ndev->bar4_split) { in ndev_ntb_debugfs_read()
638 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4)); in ndev_ntb_debugfs_read()
641 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5)); in ndev_ntb_debugfs_read()
645 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4)); in ndev_ntb_debugfs_read()
651 if (ntb_topo_is_b2b(ndev->ntb.topo)) { in ndev_ntb_debugfs_read()
659 if (ndev->bar4_split) { in ndev_ntb_debugfs_read()
679 if (ndev->bar4_split) { in ndev_ntb_debugfs_read()
706 if (ndev->bar4_split) { in ndev_ntb_debugfs_read()
760 struct intel_ntb_dev *ndev = filp->private_data; in ndev_debugfs_read() local
762 if (pdev_is_gen1(ndev->ntb.pdev)) in ndev_debugfs_read()
764 else if (pdev_is_gen3(ndev->ntb.pdev)) in ndev_debugfs_read()
766 else if (pdev_is_gen4(ndev->ntb.pdev) || pdev_is_gen5(ndev->ntb.pdev)) in ndev_debugfs_read()
772 static void ndev_init_debugfs(struct intel_ntb_dev *ndev) in ndev_init_debugfs() argument
775 ndev->debugfs_dir = NULL; in ndev_init_debugfs()
776 ndev->debugfs_info = NULL; in ndev_init_debugfs()
778 ndev->debugfs_dir = in ndev_init_debugfs()
779 debugfs_create_dir(pci_name(ndev->ntb.pdev), in ndev_init_debugfs()
781 if (IS_ERR(ndev->debugfs_dir)) in ndev_init_debugfs()
782 ndev->debugfs_info = NULL; in ndev_init_debugfs()
784 ndev->debugfs_info = in ndev_init_debugfs()
786 ndev->debugfs_dir, ndev, in ndev_init_debugfs()
791 static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev) in ndev_deinit_debugfs() argument
793 debugfs_remove_recursive(ndev->debugfs_dir); in ndev_deinit_debugfs()
809 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_mw_get_align() local
816 if (idx >= ndev->b2b_idx && !ndev->b2b_off) in intel_ntb_mw_get_align()
819 bar = ndev_mw_to_bar(ndev, idx); in intel_ntb_mw_get_align()
823 bar_size = pci_resource_len(ndev->ntb.pdev, bar); in intel_ntb_mw_get_align()
825 if (idx == ndev->b2b_idx) in intel_ntb_mw_get_align()
826 mw_size = bar_size - ndev->b2b_off; in intel_ntb_mw_get_align()
831 *addr_align = pci_resource_len(ndev->ntb.pdev, bar); in intel_ntb_mw_get_align()
845 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_mw_set_trans() local
855 if (idx >= ndev->b2b_idx && !ndev->b2b_off) in intel_ntb_mw_set_trans()
858 bar = ndev_mw_to_bar(ndev, idx); in intel_ntb_mw_set_trans()
862 bar_size = pci_resource_len(ndev->ntb.pdev, bar); in intel_ntb_mw_set_trans()
864 if (idx == ndev->b2b_idx) in intel_ntb_mw_set_trans()
865 mw_size = bar_size - ndev->b2b_off; in intel_ntb_mw_set_trans()
877 mmio = ndev->self_mmio; in intel_ntb_mw_set_trans()
878 base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar); in intel_ntb_mw_set_trans()
879 xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar); in intel_ntb_mw_set_trans()
880 limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar); in intel_ntb_mw_set_trans()
882 if (bar < 4 || !ndev->bar4_split) { in intel_ntb_mw_set_trans()
946 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_link_is_up() local
948 if (ndev->reg->link_is_up(ndev)) { in intel_ntb_link_is_up()
950 *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta); in intel_ntb_link_is_up()
952 *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta); in intel_ntb_link_is_up()
969 struct intel_ntb_dev *ndev; in intel_ntb_link_enable() local
972 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb_link_enable()
974 if (ndev->ntb.topo == NTB_TOPO_SEC) in intel_ntb_link_enable()
985 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb_link_enable()
989 if (ndev->bar4_split) in intel_ntb_link_enable()
991 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb_link_enable()
998 struct intel_ntb_dev *ndev; in intel_ntb_link_disable() local
1001 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb_link_disable()
1003 if (ndev->ntb.topo == NTB_TOPO_SEC) in intel_ntb_link_disable()
1009 ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb_link_disable()
1012 if (ndev->bar4_split) in intel_ntb_link_disable()
1015 iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb_link_disable()
1029 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_mw_get_addr() local
1032 if (idx >= ndev->b2b_idx && !ndev->b2b_off) in intel_ntb_peer_mw_get_addr()
1035 bar = ndev_mw_to_bar(ndev, idx); in intel_ntb_peer_mw_get_addr()
1040 *base = pci_resource_start(ndev->ntb.pdev, bar) + in intel_ntb_peer_mw_get_addr()
1041 (idx == ndev->b2b_idx ? ndev->b2b_off : 0); in intel_ntb_peer_mw_get_addr()
1044 *size = pci_resource_len(ndev->ntb.pdev, bar) - in intel_ntb_peer_mw_get_addr()
1045 (idx == ndev->b2b_idx ? ndev->b2b_off : 0); in intel_ntb_peer_mw_get_addr()
1062 struct intel_ntb_dev *ndev; in intel_ntb_db_vector_count() local
1064 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb_db_vector_count()
1066 return ndev->db_vec_count; in intel_ntb_db_vector_count()
1071 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_vector_mask() local
1073 if (db_vector < 0 || db_vector > ndev->db_vec_count) in intel_ntb_db_vector_mask()
1076 return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector); in intel_ntb_db_vector_mask()
1081 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_read() local
1083 return ndev_db_read(ndev, in intel_ntb_db_read()
1084 ndev->self_mmio + in intel_ntb_db_read()
1085 ndev->self_reg->db_bell); in intel_ntb_db_read()
1090 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_clear() local
1092 return ndev_db_write(ndev, db_bits, in intel_ntb_db_clear()
1093 ndev->self_mmio + in intel_ntb_db_clear()
1094 ndev->self_reg->db_bell); in intel_ntb_db_clear()
1099 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_set_mask() local
1101 return ndev_db_set_mask(ndev, db_bits, in intel_ntb_db_set_mask()
1102 ndev->self_mmio + in intel_ntb_db_set_mask()
1103 ndev->self_reg->db_mask); in intel_ntb_db_set_mask()
1108 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_clear_mask() local
1110 return ndev_db_clear_mask(ndev, db_bits, in intel_ntb_db_clear_mask()
1111 ndev->self_mmio + in intel_ntb_db_clear_mask()
1112 ndev->self_reg->db_mask); in intel_ntb_db_clear_mask()
1119 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_db_addr() local
1129 ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr, in intel_ntb_peer_db_addr()
1130 ndev->peer_reg->db_bell); in intel_ntb_peer_db_addr()
1141 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_db_set() local
1143 return ndev_db_write(ndev, db_bits, in intel_ntb_peer_db_set()
1144 ndev->peer_mmio + in intel_ntb_peer_db_set()
1145 ndev->peer_reg->db_bell); in intel_ntb_peer_db_set()
1155 struct intel_ntb_dev *ndev; in intel_ntb_spad_count() local
1157 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb_spad_count()
1159 return ndev->spad_count; in intel_ntb_spad_count()
1164 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_spad_read() local
1166 return ndev_spad_read(ndev, idx, in intel_ntb_spad_read()
1167 ndev->self_mmio + in intel_ntb_spad_read()
1168 ndev->self_reg->spad); in intel_ntb_spad_read()
1173 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_spad_write() local
1175 return ndev_spad_write(ndev, idx, val, in intel_ntb_spad_write()
1176 ndev->self_mmio + in intel_ntb_spad_write()
1177 ndev->self_reg->spad); in intel_ntb_spad_write()
1183 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_spad_addr() local
1185 return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr, in intel_ntb_peer_spad_addr()
1186 ndev->peer_reg->spad); in intel_ntb_peer_spad_addr()
1191 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_spad_read() local
1193 return ndev_spad_read(ndev, sidx, in intel_ntb_peer_spad_read()
1194 ndev->peer_mmio + in intel_ntb_peer_spad_read()
1195 ndev->peer_reg->spad); in intel_ntb_peer_spad_read()
1201 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_spad_write() local
1203 return ndev_spad_write(ndev, sidx, val, in intel_ntb_peer_spad_write()
1204 ndev->peer_mmio + in intel_ntb_peer_spad_write()
1205 ndev->peer_reg->spad); in intel_ntb_peer_spad_write()
1218 static int xeon_poll_link(struct intel_ntb_dev *ndev) in xeon_poll_link() argument
1223 ndev->reg->db_iowrite(ndev->db_link_mask, in xeon_poll_link()
1224 ndev->self_mmio + in xeon_poll_link()
1225 ndev->self_reg->db_bell); in xeon_poll_link()
1227 rc = pci_read_config_word(ndev->ntb.pdev, in xeon_poll_link()
1232 if (reg_val == ndev->lnk_sta) in xeon_poll_link()
1235 ndev->lnk_sta = reg_val; in xeon_poll_link()
1240 int xeon_link_is_up(struct intel_ntb_dev *ndev) in xeon_link_is_up() argument
1242 if (ndev->ntb.topo == NTB_TOPO_SEC) in xeon_link_is_up()
1245 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta); in xeon_link_is_up()
1248 enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd) in xeon_ppd_topo() argument
1269 static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd) in xeon_ppd_bar4_split() argument
1272 dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd); in xeon_ppd_bar4_split()
1278 static int xeon_init_isr(struct intel_ntb_dev *ndev) in xeon_init_isr() argument
1280 return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT, in xeon_init_isr()
1286 static void xeon_deinit_isr(struct intel_ntb_dev *ndev) in xeon_deinit_isr() argument
1288 ndev_deinit_isr(ndev); in xeon_deinit_isr()
1291 static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, in xeon_setup_b2b_mw() argument
1302 pdev = ndev->ntb.pdev; in xeon_setup_b2b_mw()
1303 mmio = ndev->self_mmio; in xeon_setup_b2b_mw()
1305 if (ndev->b2b_idx == UINT_MAX) { in xeon_setup_b2b_mw()
1308 ndev->b2b_off = 0; in xeon_setup_b2b_mw()
1310 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx); in xeon_setup_b2b_mw()
1316 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar); in xeon_setup_b2b_mw()
1322 ndev->b2b_off = bar_size >> 1; in xeon_setup_b2b_mw()
1325 ndev->b2b_off = 0; in xeon_setup_b2b_mw()
1326 --ndev->mw_count; in xeon_setup_b2b_mw()
1342 if (ndev->b2b_off) in xeon_setup_b2b_mw()
1351 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
1355 if (ndev->b2b_off) in xeon_setup_b2b_mw()
1367 if (ndev->b2b_off) in xeon_setup_b2b_mw()
1379 if (ndev->b2b_off) in xeon_setup_b2b_mw()
1394 else if (b2b_bar == 4 && !ndev->bar4_split) in xeon_setup_b2b_mw()
1411 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1416 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
1418 (b2b_bar == 4 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1424 (b2b_bar == 4 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1430 (b2b_bar == 5 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1438 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1443 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
1445 (b2b_bar == 4 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1451 (b2b_bar == 4 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1457 (b2b_bar == 5 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1466 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
1475 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
1488 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
1510 else if (b2b_bar == 4 && !ndev->bar4_split) in xeon_setup_b2b_mw()
1526 ndev->peer_mmio = pci_iomap(pdev, b2b_bar, in xeon_setup_b2b_mw()
1528 if (!ndev->peer_mmio) in xeon_setup_b2b_mw()
1531 ndev->peer_addr = pci_resource_start(pdev, b2b_bar); in xeon_setup_b2b_mw()
1537 static int xeon_init_ntb(struct intel_ntb_dev *ndev) in xeon_init_ntb() argument
1539 struct device *dev = &ndev->ntb.pdev->dev; in xeon_init_ntb()
1543 if (ndev->bar4_split) in xeon_init_ntb()
1544 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT; in xeon_init_ntb()
1546 ndev->mw_count = XEON_MW_COUNT; in xeon_init_ntb()
1548 ndev->spad_count = XEON_SPAD_COUNT; in xeon_init_ntb()
1549 ndev->db_count = XEON_DB_COUNT; in xeon_init_ntb()
1550 ndev->db_link_mask = XEON_DB_LINK_BIT; in xeon_init_ntb()
1552 switch (ndev->ntb.topo) { in xeon_init_ntb()
1554 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { in xeon_init_ntb()
1560 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); in xeon_init_ntb()
1562 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); in xeon_init_ntb()
1565 ndev->spad_count >>= 1; in xeon_init_ntb()
1566 ndev->self_reg = &xeon_pri_reg; in xeon_init_ntb()
1567 ndev->peer_reg = &xeon_sec_reg; in xeon_init_ntb()
1568 ndev->xlat_reg = &xeon_sec_xlat; in xeon_init_ntb()
1572 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { in xeon_init_ntb()
1577 ndev->spad_count >>= 1; in xeon_init_ntb()
1578 ndev->self_reg = &xeon_sec_reg; in xeon_init_ntb()
1579 ndev->peer_reg = &xeon_pri_reg; in xeon_init_ntb()
1580 ndev->xlat_reg = &xeon_pri_xlat; in xeon_init_ntb()
1585 ndev->self_reg = &xeon_pri_reg; in xeon_init_ntb()
1586 ndev->peer_reg = &xeon_b2b_reg; in xeon_init_ntb()
1587 ndev->xlat_reg = &xeon_sec_xlat; in xeon_init_ntb()
1589 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { in xeon_init_ntb()
1590 ndev->peer_reg = &xeon_pri_reg; in xeon_init_ntb()
1593 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count; in xeon_init_ntb()
1595 ndev->b2b_idx = b2b_mw_idx; in xeon_init_ntb()
1597 if (ndev->b2b_idx >= ndev->mw_count) { in xeon_init_ntb()
1600 b2b_mw_idx, ndev->mw_count); in xeon_init_ntb()
1605 b2b_mw_idx, ndev->b2b_idx); in xeon_init_ntb()
1607 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) { in xeon_init_ntb()
1609 ndev->db_count -= 1; in xeon_init_ntb()
1612 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) { in xeon_init_ntb()
1613 rc = xeon_setup_b2b_mw(ndev, in xeon_init_ntb()
1617 rc = xeon_setup_b2b_mw(ndev, in xeon_init_ntb()
1626 ndev->self_mmio + XEON_SPCICMD_OFFSET); in xeon_init_ntb()
1634 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; in xeon_init_ntb()
1636 ndev->reg->db_iowrite(ndev->db_valid_mask, in xeon_init_ntb()
1637 ndev->self_mmio + in xeon_init_ntb()
1638 ndev->self_reg->db_mask); in xeon_init_ntb()
1643 static int xeon_init_dev(struct intel_ntb_dev *ndev) in xeon_init_dev() argument
1649 pdev = ndev->ntb.pdev; in xeon_init_dev()
1673 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP; in xeon_init_dev()
1690 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP; in xeon_init_dev()
1714 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14; in xeon_init_dev()
1718 ndev->reg = &xeon_reg; in xeon_init_dev()
1724 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); in xeon_init_dev()
1726 ntb_topo_string(ndev->ntb.topo)); in xeon_init_dev()
1727 if (ndev->ntb.topo == NTB_TOPO_NONE) in xeon_init_dev()
1730 if (ndev->ntb.topo != NTB_TOPO_SEC) { in xeon_init_dev()
1731 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd); in xeon_init_dev()
1733 ppd, ndev->bar4_split); in xeon_init_dev()
1740 ndev->bar4_split = hweight32(mem) == in xeon_init_dev()
1743 mem, ndev->bar4_split); in xeon_init_dev()
1746 rc = xeon_init_ntb(ndev); in xeon_init_dev()
1750 return xeon_init_isr(ndev); in xeon_init_dev()
1753 static void xeon_deinit_dev(struct intel_ntb_dev *ndev) in xeon_deinit_dev() argument
1755 xeon_deinit_isr(ndev); in xeon_deinit_dev()
1758 static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) in intel_ntb_init_pci() argument
1762 pci_set_drvdata(pdev, ndev); in intel_ntb_init_pci()
1782 ndev->self_mmio = pci_iomap(pdev, 0, 0); in intel_ntb_init_pci()
1783 if (!ndev->self_mmio) { in intel_ntb_init_pci()
1787 ndev->peer_mmio = ndev->self_mmio; in intel_ntb_init_pci()
1788 ndev->peer_addr = pci_resource_start(pdev, 0); in intel_ntb_init_pci()
1802 static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev) in intel_ntb_deinit_pci() argument
1804 struct pci_dev *pdev = ndev->ntb.pdev; in intel_ntb_deinit_pci()
1806 if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio) in intel_ntb_deinit_pci()
1807 pci_iounmap(pdev, ndev->peer_mmio); in intel_ntb_deinit_pci()
1808 pci_iounmap(pdev, ndev->self_mmio); in intel_ntb_deinit_pci()
1815 static inline void ndev_init_struct(struct intel_ntb_dev *ndev, in ndev_init_struct() argument
1818 ndev->ntb.pdev = pdev; in ndev_init_struct()
1819 ndev->ntb.topo = NTB_TOPO_NONE; in ndev_init_struct()
1820 ndev->ntb.ops = &intel_ntb_ops; in ndev_init_struct()
1822 ndev->b2b_off = 0; in ndev_init_struct()
1823 ndev->b2b_idx = UINT_MAX; in ndev_init_struct()
1825 ndev->bar4_split = 0; in ndev_init_struct()
1827 ndev->mw_count = 0; in ndev_init_struct()
1828 ndev->spad_count = 0; in ndev_init_struct()
1829 ndev->db_count = 0; in ndev_init_struct()
1830 ndev->db_vec_count = 0; in ndev_init_struct()
1831 ndev->db_vec_shift = 0; in ndev_init_struct()
1833 ndev->ntb_ctl = 0; in ndev_init_struct()
1834 ndev->lnk_sta = 0; in ndev_init_struct()
1836 ndev->db_valid_mask = 0; in ndev_init_struct()
1837 ndev->db_link_mask = 0; in ndev_init_struct()
1838 ndev->db_mask = 0; in ndev_init_struct()
1840 spin_lock_init(&ndev->db_mask_lock); in ndev_init_struct()
1846 struct intel_ntb_dev *ndev; in intel_ntb_pci_probe() local
1850 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); in intel_ntb_pci_probe()
1851 if (!ndev) { in intel_ntb_pci_probe()
1856 ndev_init_struct(ndev, pdev); in intel_ntb_pci_probe()
1859 rc = intel_ntb_init_pci(ndev, pdev); in intel_ntb_pci_probe()
1863 rc = xeon_init_dev(ndev); in intel_ntb_pci_probe()
1867 ndev->ntb.ops = &intel_ntb3_ops; in intel_ntb_pci_probe()
1868 rc = intel_ntb_init_pci(ndev, pdev); in intel_ntb_pci_probe()
1872 rc = gen3_init_dev(ndev); in intel_ntb_pci_probe()
1876 ndev->ntb.ops = &intel_ntb4_ops; in intel_ntb_pci_probe()
1877 rc = intel_ntb_init_pci(ndev, pdev); in intel_ntb_pci_probe()
1881 rc = gen4_init_dev(ndev); in intel_ntb_pci_probe()
1889 ndev_reset_unsafe_flags(ndev); in intel_ntb_pci_probe()
1891 ndev->reg->poll_link(ndev); in intel_ntb_pci_probe()
1893 ndev_init_debugfs(ndev); in intel_ntb_pci_probe()
1895 rc = ntb_register_device(&ndev->ntb); in intel_ntb_pci_probe()
1904 ndev_deinit_debugfs(ndev); in intel_ntb_pci_probe()
1907 xeon_deinit_dev(ndev); in intel_ntb_pci_probe()
1909 intel_ntb_deinit_pci(ndev); in intel_ntb_pci_probe()
1911 kfree(ndev); in intel_ntb_pci_probe()
1918 struct intel_ntb_dev *ndev = pci_get_drvdata(pdev); in intel_ntb_pci_remove() local
1920 ntb_unregister_device(&ndev->ntb); in intel_ntb_pci_remove()
1921 ndev_deinit_debugfs(ndev); in intel_ntb_pci_remove()
1924 xeon_deinit_dev(ndev); in intel_ntb_pci_remove()
1925 intel_ntb_deinit_pci(ndev); in intel_ntb_pci_remove()
1926 kfree(ndev); in intel_ntb_pci_remove()