Lines Matching +full:flip +full:- +full:chip

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
53 * qib_disarm_piobufs - cancel a range of PIO buffers
68 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_disarm_piobufs()
70 __clear_bit(i, dd->pio_need_disarm); in qib_disarm_piobufs()
71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs()
73 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_disarm_piobufs()
82 struct qib_devdata *dd = rcd->dd; in qib_disarm_piobufs_ifneeded()
86 last = rcd->pio_base + rcd->piocnt; in qib_disarm_piobufs_ifneeded()
92 if (rcd->user_event_mask) { in qib_disarm_piobufs_ifneeded()
97 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]); in qib_disarm_piobufs_ifneeded()
98 for (i = 1; i < rcd->subctxt_cnt; i++) in qib_disarm_piobufs_ifneeded()
100 &rcd->user_event_mask[i]); in qib_disarm_piobufs_ifneeded()
102 spin_lock_irq(&dd->pioavail_lock); in qib_disarm_piobufs_ifneeded()
103 for (i = rcd->pio_base; i < last; i++) { in qib_disarm_piobufs_ifneeded()
104 if (__test_and_clear_bit(i, dd->pio_need_disarm)) in qib_disarm_piobufs_ifneeded()
105 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_ifneeded()
107 spin_unlock_irq(&dd->pioavail_lock); in qib_disarm_piobufs_ifneeded()
116 for (pidx = 0; pidx < dd->num_pports; pidx++) { in is_sdma_buf()
117 ppd = dd->pport + pidx; in is_sdma_buf()
118 if (i >= ppd->sdma_state.first_sendbuf && in is_sdma_buf()
119 i < ppd->sdma_state.last_sendbuf) in is_sdma_buf()
135 spin_lock(&dd->uctxt_lock); in find_ctxt()
136 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in find_ctxt()
137 rcd = dd->rcd[ctxt]; in find_ctxt()
138 if (!rcd || bufn < rcd->pio_base || in find_ctxt()
139 bufn >= rcd->pio_base + rcd->piocnt) in find_ctxt()
141 if (rcd->user_event_mask) { in find_ctxt()
148 &rcd->user_event_mask[0]); in find_ctxt()
149 for (i = 1; i < rcd->subctxt_cnt; i++) in find_ctxt()
151 &rcd->user_event_mask[i]); in find_ctxt()
156 spin_unlock(&dd->uctxt_lock); in find_ctxt()
175 for (i = 0; i < dd->num_pports; i++) in qib_disarm_piobufs_set()
187 pppd[ppd->port] = ppd; in qib_disarm_piobufs_set()
194 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_disarm_piobufs_set()
195 if (test_bit(i, dd->pio_writing) || in qib_disarm_piobufs_set()
196 (!test_bit(i << 1, dd->pioavailkernel) && in qib_disarm_piobufs_set()
198 __set_bit(i, dd->pio_need_disarm); in qib_disarm_piobufs_set()
200 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_set()
202 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_disarm_piobufs_set()
206 for (i = 0; i < dd->num_pports; i++) in qib_disarm_piobufs_set()
212 * update_send_bufs - update shadow copy of the PIO availability map
221 const unsigned piobregs = dd->pioavregs; in update_send_bufs()
241 if (!dd->pioavailregs_dma) in update_send_bufs()
243 spin_lock_irqsave(&dd->pioavail_lock, flags); in update_send_bufs()
247 piov = le64_to_cpu(dd->pioavailregs_dma[i]); in update_send_bufs()
248 pchg = dd->pioavailkernel[i] & in update_send_bufs()
249 ~(dd->pioavailshadow[i] ^ piov); in update_send_bufs()
251 if (pchg && (pchbusy & dd->pioavailshadow[i])) { in update_send_bufs()
252 pnew = dd->pioavailshadow[i] & ~pchbusy; in update_send_bufs()
254 dd->pioavailshadow[i] = pnew; in update_send_bufs()
257 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in update_send_bufs()
265 dd->upd_pio_shadow = 1; in no_send_bufs()
284 unsigned long *shadow = dd->pioavailshadow; in qib_getsendbuf_range()
287 if (!(dd->flags & QIB_PRESENT)) in qib_getsendbuf_range()
290 nbufs = last - first + 1; /* number in range to check */ in qib_getsendbuf_range()
291 if (dd->upd_pio_shadow) { in qib_getsendbuf_range()
307 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_getsendbuf_range()
308 if (dd->last_pio >= first && dd->last_pio <= last) in qib_getsendbuf_range()
309 i = dd->last_pio + 1; in qib_getsendbuf_range()
312 nbufs = last - dd->min_kernel_pio + 1; in qib_getsendbuf_range()
315 i = !first ? dd->min_kernel_pio : first; in qib_getsendbuf_range()
318 /* flip generation bit */ in qib_getsendbuf_range()
321 __set_bit(i, dd->pio_writing); in qib_getsendbuf_range()
323 dd->last_pio = i; in qib_getsendbuf_range()
326 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_getsendbuf_range()
338 if (i < dd->piobcnt2k) in qib_getsendbuf_range()
339 buf = (u32 __iomem *)(dd->pio2kbase + in qib_getsendbuf_range()
340 i * dd->palign); in qib_getsendbuf_range()
341 else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base) in qib_getsendbuf_range()
342 buf = (u32 __iomem *)(dd->pio4kbase + in qib_getsendbuf_range()
343 (i - dd->piobcnt2k) * dd->align4k); in qib_getsendbuf_range()
345 buf = (u32 __iomem *)(dd->piovl15base + in qib_getsendbuf_range()
346 (i - (dd->piobcnt2k + dd->piobcnt4k)) * in qib_getsendbuf_range()
347 dd->align4k); in qib_getsendbuf_range()
350 dd->upd_pio_shadow = 0; in qib_getsendbuf_range()
364 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_sendbuf_done()
365 __clear_bit(n, dd->pio_writing); in qib_sendbuf_done()
366 if (__test_and_clear_bit(n, dd->pio_need_disarm)) in qib_sendbuf_done()
367 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n)); in qib_sendbuf_done()
368 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_sendbuf_done()
372 * qib_chg_pioavailkernel - change which send buffers are available for kernel
390 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_chg_pioavailkernel()
404 * dma array because it is always little-endian, so in qib_chg_pioavailkernel()
405 * we have to flip to host-order first. in qib_chg_pioavailkernel()
407 * always 64 bits per register in chip... in qib_chg_pioavailkernel()
412 dd->pioavailshadow); in qib_chg_pioavailkernel()
414 le64_to_cpu(dd->pioavailregs_dma[i]); in qib_chg_pioavailkernel()
418 start, dd->pioavailshadow); in qib_chg_pioavailkernel()
421 + start, dd->pioavailshadow); in qib_chg_pioavailkernel()
422 __set_bit(start, dd->pioavailkernel); in qib_chg_pioavailkernel()
423 if ((start >> 1) < dd->min_kernel_pio) in qib_chg_pioavailkernel()
424 dd->min_kernel_pio = start >> 1; in qib_chg_pioavailkernel()
427 dd->pioavailshadow); in qib_chg_pioavailkernel()
428 __clear_bit(start, dd->pioavailkernel); in qib_chg_pioavailkernel()
429 if ((start >> 1) > dd->min_kernel_pio) in qib_chg_pioavailkernel()
430 dd->min_kernel_pio = start >> 1; in qib_chg_pioavailkernel()
435 if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1) in qib_chg_pioavailkernel()
436 dd->last_pio = dd->min_kernel_pio - 1; in qib_chg_pioavailkernel()
437 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_chg_pioavailkernel()
439 dd->f_txchk_change(dd, ostart, len, avail, rcd); in qib_chg_pioavailkernel()
447 * launch fifo. The cancel is superfluous on some chip versions, but
449 * PIOAvail bits are updated by the chip as if a normal send had happened.
453 struct qib_devdata *dd = ppd->dd; in qib_cancel_sends()
468 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in qib_cancel_sends()
469 spin_lock_irqsave(&dd->uctxt_lock, flags); in qib_cancel_sends()
470 rcd = dd->rcd[ctxt]; in qib_cancel_sends()
471 if (rcd && rcd->ppd == ppd) { in qib_cancel_sends()
472 last = rcd->pio_base + rcd->piocnt; in qib_cancel_sends()
473 if (rcd->user_event_mask) { in qib_cancel_sends()
480 &rcd->user_event_mask[0]); in qib_cancel_sends()
481 for (i = 1; i < rcd->subctxt_cnt; i++) in qib_cancel_sends()
483 &rcd->user_event_mask[i]); in qib_cancel_sends()
485 i = rcd->pio_base; in qib_cancel_sends()
486 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_cancel_sends()
487 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_cancel_sends()
489 __set_bit(i, dd->pio_need_disarm); in qib_cancel_sends()
490 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_cancel_sends()
492 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_cancel_sends()
495 if (!(dd->flags & QIB_HAS_SEND_DMA)) in qib_cancel_sends()
496 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL | in qib_cancel_sends()
501 * Force an update of in-memory copy of the pioavail registers, when
505 * This is a per-device operation, so just the first port.
509 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_force_pio_avail_update()
518 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) in qib_hol_down()
529 if (ppd->hol_state != QIB_HOL_INIT) { in qib_hol_init()
530 ppd->hol_state = QIB_HOL_INIT; in qib_hol_init()
531 mod_timer(&ppd->hol_timer, in qib_hol_init()
543 ppd->hol_state = QIB_HOL_UP; in qib_hol_up()
554 if (!(ppd->dd->flags & QIB_INITTED)) in qib_hol_event()
557 if (ppd->hol_state != QIB_HOL_UP) { in qib_hol_event()
563 mod_timer(&ppd->hol_timer, in qib_hol_event()