Lines Matching full:rm

48 static void rds_ib_send_complete(struct rds_message *rm,  in rds_ib_send_complete()  argument
50 void (*complete)(struct rds_message *rm, int status)) in rds_ib_send_complete() argument
70 complete(rm, notify_status); in rds_ib_send_complete()
146 * Returns the rm for no good reason other than it is unobtainable
154 struct rds_message *rm = NULL; in rds_ib_send_unmap_op() local
160 rm = container_of(send->s_op, struct rds_message, data); in rds_ib_send_unmap_op()
167 rm = container_of(send->s_op, struct rds_message, rdma); in rds_ib_send_unmap_op()
174 rm = container_of(send->s_op, struct rds_message, atomic); in rds_ib_send_unmap_op()
187 return rm; in rds_ib_send_unmap_op()
245 struct rds_message *rm = NULL; in rds_ib_send_cqe_handler() local
276 rm = rds_ib_send_unmap_op(ic, send, wc->status); in rds_ib_send_cqe_handler()
282 if (send->s_op == rm->m_final_op) { in rds_ib_send_cqe_handler()
286 rds_message_unmapped(rm); in rds_ib_send_cqe_handler()
288 rds_message_put(rm); in rds_ib_send_cqe_handler()
484 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, in rds_ib_xmit() argument
511 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { in rds_ib_xmit()
513 scat = &rm->data.op_sg[sg]; in rds_ib_xmit()
519 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) in rds_ib_xmit()
522 i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); in rds_ib_xmit()
550 if (rm->data.op_nents) { in rds_ib_xmit()
551 rm->data.op_count = ib_dma_map_sg(dev, in rds_ib_xmit()
552 rm->data.op_sg, in rds_ib_xmit()
553 rm->data.op_nents, in rds_ib_xmit()
555 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count); in rds_ib_xmit()
556 if (rm->data.op_count == 0) { in rds_ib_xmit()
563 rm->data.op_count = 0; in rds_ib_xmit()
566 rds_message_addref(rm); in rds_ib_xmit()
567 rm->data.op_dmasg = 0; in rds_ib_xmit()
568 rm->data.op_dmaoff = 0; in rds_ib_xmit()
569 ic->i_data_op = &rm->data; in rds_ib_xmit()
572 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags)) in rds_ib_xmit()
573 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED; in rds_ib_xmit()
574 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) in rds_ib_xmit()
575 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; in rds_ib_xmit()
579 if (rm->rdma.op_active) { in rds_ib_xmit()
582 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey); in rds_ib_xmit()
583 rds_message_add_extension(&rm->m_inc.i_hdr, in rds_ib_xmit()
586 if (rm->m_rdma_cookie) { in rds_ib_xmit()
587 rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr, in rds_ib_xmit()
588 rds_rdma_cookie_key(rm->m_rdma_cookie), in rds_ib_xmit()
589 rds_rdma_cookie_offset(rm->m_rdma_cookie)); in rds_ib_xmit()
596 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic)); in rds_ib_xmit()
597 rds_message_make_checksum(&rm->m_inc.i_hdr); in rds_ib_xmit()
615 if (rm->rdma.op_active && rm->rdma.op_fence) in rds_ib_xmit()
622 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg]; in rds_ib_xmit()
644 memcpy(ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, in rds_ib_xmit()
650 && scat != &rm->data.op_sg[rm->data.op_count]) { in rds_ib_xmit()
652 sg_dma_len(scat) - rm->data.op_dmaoff); in rds_ib_xmit()
656 send->s_sge[1].addr += rm->data.op_dmaoff; in rds_ib_xmit()
661 rm->data.op_dmaoff += len; in rds_ib_xmit()
662 if (rm->data.op_dmaoff == sg_dma_len(scat)) { in rds_ib_xmit()
664 rm->data.op_dmasg++; in rds_ib_xmit()
665 rm->data.op_dmaoff = 0; in rds_ib_xmit()
708 && scat != &rm->data.op_sg[rm->data.op_count]); in rds_ib_xmit()
716 if (scat == &rm->data.op_sg[rm->data.op_count]) { in rds_ib_xmit()