Lines Matching full:cp

65 void rds_send_path_reset(struct rds_conn_path *cp)  in rds_send_path_reset()  argument
70 if (cp->cp_xmit_rm) { in rds_send_path_reset()
71 rm = cp->cp_xmit_rm; in rds_send_path_reset()
72 cp->cp_xmit_rm = NULL; in rds_send_path_reset()
81 cp->cp_xmit_sg = 0; in rds_send_path_reset()
82 cp->cp_xmit_hdr_off = 0; in rds_send_path_reset()
83 cp->cp_xmit_data_off = 0; in rds_send_path_reset()
84 cp->cp_xmit_atomic_sent = 0; in rds_send_path_reset()
85 cp->cp_xmit_rdma_sent = 0; in rds_send_path_reset()
86 cp->cp_xmit_data_sent = 0; in rds_send_path_reset()
88 cp->cp_conn->c_map_queued = 0; in rds_send_path_reset()
90 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets; in rds_send_path_reset()
91 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes; in rds_send_path_reset()
94 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_path_reset()
95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { in rds_send_path_reset()
99 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue); in rds_send_path_reset()
100 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_path_reset()
104 static int acquire_in_xmit(struct rds_conn_path *cp) in acquire_in_xmit() argument
106 return test_and_set_bit_lock(RDS_IN_XMIT, &cp->cp_flags) == 0; in acquire_in_xmit()
109 static void release_in_xmit(struct rds_conn_path *cp) in release_in_xmit() argument
111 clear_bit_unlock(RDS_IN_XMIT, &cp->cp_flags); in release_in_xmit()
118 if (waitqueue_active(&cp->cp_waitq)) in release_in_xmit()
119 wake_up_all(&cp->cp_waitq); in release_in_xmit()
136 int rds_send_xmit(struct rds_conn_path *cp) in rds_send_xmit() argument
138 struct rds_connection *conn = cp->cp_conn; in rds_send_xmit()
159 if (!acquire_in_xmit(cp)) { in rds_send_xmit()
165 if (rds_destroy_pending(cp->cp_conn)) { in rds_send_xmit()
166 release_in_xmit(cp); in rds_send_xmit()
179 send_gen = READ_ONCE(cp->cp_send_gen) + 1; in rds_send_xmit()
180 WRITE_ONCE(cp->cp_send_gen, send_gen); in rds_send_xmit()
186 if (!rds_conn_path_up(cp)) { in rds_send_xmit()
187 release_in_xmit(cp); in rds_send_xmit()
193 conn->c_trans->xmit_path_prepare(cp); in rds_send_xmit()
201 rm = cp->cp_xmit_rm; in rds_send_xmit()
225 rm->m_inc.i_conn_path = cp; in rds_send_xmit()
226 rm->m_inc.i_conn = cp->cp_conn; in rds_send_xmit()
228 cp->cp_xmit_rm = rm; in rds_send_xmit()
251 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_xmit()
253 if (!list_empty(&cp->cp_send_queue)) { in rds_send_xmit()
254 rm = list_entry(cp->cp_send_queue.next, in rds_send_xmit()
264 &cp->cp_retrans); in rds_send_xmit()
267 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_xmit()
282 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_xmit()
285 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_xmit()
291 if (cp->cp_unacked_packets == 0 || in rds_send_xmit()
292 cp->cp_unacked_bytes < len) { in rds_send_xmit()
295 cp->cp_unacked_packets = in rds_send_xmit()
297 cp->cp_unacked_bytes = in rds_send_xmit()
301 cp->cp_unacked_bytes -= len; in rds_send_xmit()
302 cp->cp_unacked_packets--; in rds_send_xmit()
305 cp->cp_xmit_rm = rm; in rds_send_xmit()
309 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) { in rds_send_xmit()
321 cp->cp_xmit_rdma_sent = 1; in rds_send_xmit()
325 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) { in rds_send_xmit()
337 cp->cp_xmit_atomic_sent = 1; in rds_send_xmit()
363 if (rm->data.op_active && !cp->cp_xmit_data_sent) { in rds_send_xmit()
367 cp->cp_xmit_hdr_off, in rds_send_xmit()
368 cp->cp_xmit_sg, in rds_send_xmit()
369 cp->cp_xmit_data_off); in rds_send_xmit()
373 if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) { in rds_send_xmit()
376 cp->cp_xmit_hdr_off); in rds_send_xmit()
377 cp->cp_xmit_hdr_off += tmp; in rds_send_xmit()
381 sg = &rm->data.op_sg[cp->cp_xmit_sg]; in rds_send_xmit()
384 cp->cp_xmit_data_off); in rds_send_xmit()
385 cp->cp_xmit_data_off += tmp; in rds_send_xmit()
387 if (cp->cp_xmit_data_off == sg->length) { in rds_send_xmit()
388 cp->cp_xmit_data_off = 0; in rds_send_xmit()
390 cp->cp_xmit_sg++; in rds_send_xmit()
391 BUG_ON(ret != 0 && cp->cp_xmit_sg == in rds_send_xmit()
396 if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) && in rds_send_xmit()
397 (cp->cp_xmit_sg == rm->data.op_nents)) in rds_send_xmit()
398 cp->cp_xmit_data_sent = 1; in rds_send_xmit()
406 if (!rm->data.op_active || cp->cp_xmit_data_sent) { in rds_send_xmit()
407 cp->cp_xmit_rm = NULL; in rds_send_xmit()
408 cp->cp_xmit_sg = 0; in rds_send_xmit()
409 cp->cp_xmit_hdr_off = 0; in rds_send_xmit()
410 cp->cp_xmit_data_off = 0; in rds_send_xmit()
411 cp->cp_xmit_rdma_sent = 0; in rds_send_xmit()
412 cp->cp_xmit_atomic_sent = 0; in rds_send_xmit()
413 cp->cp_xmit_data_sent = 0; in rds_send_xmit()
421 conn->c_trans->xmit_path_complete(cp); in rds_send_xmit()
422 release_in_xmit(cp); in rds_send_xmit()
451 raced = send_gen != READ_ONCE(cp->cp_send_gen); in rds_send_xmit()
454 !list_empty(&cp->cp_send_queue)) && !raced) { in rds_send_xmit()
458 if (rds_destroy_pending(cp->cp_conn)) in rds_send_xmit()
461 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); in rds_send_xmit()
685 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack, in rds_send_path_drop_acked() argument
692 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_path_drop_acked()
694 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { in rds_send_path_drop_acked()
706 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_path_drop_acked()
725 struct rds_conn_path *cp; in rds_send_drop_to() local
756 cp = rm->m_inc.i_conn_path; in rds_send_drop_to()
758 cp = &conn->c_path[0]; in rds_send_drop_to()
760 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_drop_to()
767 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_drop_to()
771 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_drop_to()
818 struct rds_conn_path *cp, in rds_send_queue_rm() argument
863 rm->m_inc.i_conn_path = cp; in rds_send_queue_rm()
866 spin_lock(&cp->cp_lock); in rds_send_queue_rm()
867 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++); in rds_send_queue_rm()
868 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); in rds_send_queue_rm()
870 spin_unlock(&cp->cp_lock); in rds_send_queue_rm()
1421 rds_send_probe(struct rds_conn_path *cp, __be16 sport, in rds_send_probe() argument
1434 rm->m_daddr = cp->cp_conn->c_faddr; in rds_send_probe()
1437 rds_conn_path_connect_if_down(cp); in rds_send_probe()
1439 ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL); in rds_send_probe()
1443 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_probe()
1444 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); in rds_send_probe()
1447 rm->m_inc.i_conn = cp->cp_conn; in rds_send_probe()
1448 rm->m_inc.i_conn_path = cp; in rds_send_probe()
1451 cp->cp_next_tx_seq); in rds_send_probe()
1453 cp->cp_next_tx_seq++; in rds_send_probe()
1456 cp->cp_conn->c_trans->t_mp_capable) { in rds_send_probe()
1458 u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num); in rds_send_probe()
1468 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_probe()
1475 if (!rds_destroy_pending(cp->cp_conn)) in rds_send_probe()
1476 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); in rds_send_probe()
1489 rds_send_pong(struct rds_conn_path *cp, __be16 dport) in rds_send_pong() argument
1491 return rds_send_probe(cp, 0, dport, 0); in rds_send_pong()
1498 struct rds_conn_path *cp = &conn->c_path[cp_index]; in rds_send_ping() local
1500 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_ping()
1502 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_ping()
1506 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_ping()
1507 rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0); in rds_send_ping()