Lines Matching full:req

26 	struct drbd_request *req;  in drbd_req_new()  local
28 req = mempool_alloc(&drbd_request_mempool, GFP_NOIO); in drbd_req_new()
29 if (!req) in drbd_req_new()
31 memset(req, 0, sizeof(*req)); in drbd_req_new()
33 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) in drbd_req_new()
36 req->device = device; in drbd_req_new()
37 req->master_bio = bio_src; in drbd_req_new()
38 req->epoch = 0; in drbd_req_new()
40 drbd_clear_interval(&req->i); in drbd_req_new()
41 req->i.sector = bio_src->bi_iter.bi_sector; in drbd_req_new()
42 req->i.size = bio_src->bi_iter.bi_size; in drbd_req_new()
43 req->i.local = true; in drbd_req_new()
44 req->i.waiting = false; in drbd_req_new()
46 INIT_LIST_HEAD(&req->tl_requests); in drbd_req_new()
47 INIT_LIST_HEAD(&req->w.list); in drbd_req_new()
48 INIT_LIST_HEAD(&req->req_pending_master_completion); in drbd_req_new()
49 INIT_LIST_HEAD(&req->req_pending_local); in drbd_req_new()
52 atomic_set(&req->completion_ref, 1); in drbd_req_new()
54 kref_init(&req->kref); in drbd_req_new()
55 return req; in drbd_req_new()
59 struct drbd_request *req) in drbd_remove_request_interval() argument
61 struct drbd_device *device = req->device; in drbd_remove_request_interval()
62 struct drbd_interval *i = &req->i; in drbd_remove_request_interval()
73 struct drbd_request *req = container_of(kref, struct drbd_request, kref); in drbd_req_destroy() local
74 struct drbd_device *device = req->device; in drbd_req_destroy()
75 const unsigned s = req->rq_state; in drbd_req_destroy()
77 if ((req->master_bio && !(s & RQ_POSTPONED)) || in drbd_req_destroy()
78 atomic_read(&req->completion_ref) || in drbd_req_destroy()
82 s, atomic_read(&req->completion_ref)); in drbd_req_destroy()
88 * req_lock, and req->tl_requests will typicaly be on ->transfer_log, in drbd_req_destroy()
92 * still allowed to unconditionally list_del(&req->tl_requests), in drbd_req_destroy()
94 list_del_init(&req->tl_requests); in drbd_req_destroy()
98 if (!drbd_interval_empty(&req->i)) { in drbd_req_destroy()
105 drbd_remove_request_interval(root, req); in drbd_req_destroy()
106 } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0) in drbd_req_destroy()
108 s, (unsigned long long)req->i.sector, req->i.size); in drbd_req_destroy()
128 drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size); in drbd_req_destroy()
131 drbd_set_in_sync(peer_device, req->i.sector, req->i.size); in drbd_req_destroy()
146 drbd_al_complete_io(device, &req->i); in drbd_req_destroy()
151 (unsigned long long) req->i.sector, req->i.size); in drbd_req_destroy()
156 mempool_free(req, &drbd_request_mempool); in drbd_req_destroy()
193 void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) in drbd_req_complete() argument
195 const unsigned s = req->rq_state; in drbd_req_complete()
196 struct drbd_device *device = req->device; in drbd_req_complete()
215 if (!req->master_bio) { in drbd_req_complete()
234 error = PTR_ERR(req->private_bio); in drbd_req_complete()
243 if (op_is_write(bio_op(req->master_bio)) && in drbd_req_complete()
244 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) in drbd_req_complete()
248 bio_end_io_acct(req->master_bio, req->start_jif); in drbd_req_complete()
265 bio_op(req->master_bio) == REQ_OP_READ && in drbd_req_complete()
266 !(req->master_bio->bi_opf & REQ_RAHEAD) && in drbd_req_complete()
267 !list_empty(&req->tl_requests)) in drbd_req_complete()
268 req->rq_state |= RQ_POSTPONED; in drbd_req_complete()
270 if (!(req->rq_state & RQ_POSTPONED)) { in drbd_req_complete()
272 m->bio = req->master_bio; in drbd_req_complete()
273 req->master_bio = NULL; in drbd_req_complete()
278 req->i.completed = true; in drbd_req_complete()
281 if (req->i.waiting) in drbd_req_complete()
288 list_del_init(&req->req_pending_master_completion); in drbd_req_complete()
292 static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) in drbd_req_put_completion_ref() argument
294 struct drbd_device *device = req->device; in drbd_req_put_completion_ref()
295 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); in drbd_req_put_completion_ref()
300 if (!atomic_sub_and_test(put, &req->completion_ref)) in drbd_req_put_completion_ref()
303 drbd_req_complete(req, m); in drbd_req_put_completion_ref()
306 * we need to keep the req object around. */ in drbd_req_put_completion_ref()
307 if (req->rq_state & RQ_LOCAL_ABORTED) in drbd_req_put_completion_ref()
310 if (req->rq_state & RQ_POSTPONED) { in drbd_req_put_completion_ref()
311 /* don't destroy the req object just yet, in drbd_req_put_completion_ref()
313 drbd_restart_request(req); in drbd_req_put_completion_ref()
317 kref_put(&req->kref, drbd_req_destroy); in drbd_req_put_completion_ref()
320 static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) in set_if_null_req_next() argument
326 connection->req_next = req; in set_if_null_req_next()
329 static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) in advance_conn_req_next() argument
332 struct drbd_request *iter = req; in advance_conn_req_next()
335 if (connection->req_next != req) in advance_conn_req_next()
338 req = NULL; in advance_conn_req_next()
343 req = iter; in advance_conn_req_next()
347 connection->req_next = req; in advance_conn_req_next()
350 …ic void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) in set_if_null_req_ack_pending() argument
356 connection->req_ack_pending = req; in set_if_null_req_ack_pending()
359 …c void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) in advance_conn_req_ack_pending() argument
362 struct drbd_request *iter = req; in advance_conn_req_ack_pending()
365 if (connection->req_ack_pending != req) in advance_conn_req_ack_pending()
368 req = NULL; in advance_conn_req_ack_pending()
373 req = iter; in advance_conn_req_ack_pending()
377 connection->req_ack_pending = req; in advance_conn_req_ack_pending()
380 …c void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) in set_if_null_req_not_net_done() argument
386 connection->req_not_net_done = req; in set_if_null_req_not_net_done()
389 … void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) in advance_conn_req_not_net_done() argument
392 struct drbd_request *iter = req; in advance_conn_req_not_net_done()
395 if (connection->req_not_net_done != req) in advance_conn_req_not_net_done()
398 req = NULL; in advance_conn_req_not_net_done()
403 req = iter; in advance_conn_req_not_net_done()
407 connection->req_not_net_done = req; in advance_conn_req_not_net_done()
411 * req->completion_ref and req->kref. */
412 static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, in mod_rq_state() argument
415 struct drbd_device *device = req->device; in mod_rq_state()
417 unsigned s = req->rq_state; in mod_rq_state()
425 req->rq_state &= ~clear; in mod_rq_state()
426 req->rq_state |= set; in mod_rq_state()
429 if (req->rq_state == s) in mod_rq_state()
434 kref_get(&req->kref); in mod_rq_state()
437 atomic_inc(&req->completion_ref); in mod_rq_state()
441 atomic_inc(&req->completion_ref); in mod_rq_state()
445 atomic_inc(&req->completion_ref); in mod_rq_state()
446 set_if_null_req_next(peer_device, req); in mod_rq_state()
450 kref_get(&req->kref); /* wait for the DONE */ in mod_rq_state()
455 atomic_add(req->i.size >> 9, &device->ap_in_flight); in mod_rq_state()
456 set_if_null_req_not_net_done(peer_device, req); in mod_rq_state()
458 if (req->rq_state & RQ_NET_PENDING) in mod_rq_state()
459 set_if_null_req_ack_pending(peer_device, req); in mod_rq_state()
463 atomic_inc(&req->completion_ref); in mod_rq_state()
471 D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING); in mod_rq_state()
476 if (req->rq_state & RQ_LOCAL_ABORTED) in mod_rq_state()
477 kref_put(&req->kref, drbd_req_destroy); in mod_rq_state()
480 list_del_init(&req->req_pending_local); in mod_rq_state()
486 req->acked_jif = jiffies; in mod_rq_state()
487 advance_conn_req_ack_pending(peer_device, req); in mod_rq_state()
492 advance_conn_req_next(peer_device, req); in mod_rq_state()
497 atomic_sub(req->i.size >> 9, &device->ap_in_flight); in mod_rq_state()
499 kref_put(&req->kref, drbd_req_destroy); in mod_rq_state()
500 req->net_done_jif = jiffies; in mod_rq_state()
505 advance_conn_req_next(peer_device, req); in mod_rq_state()
506 advance_conn_req_ack_pending(peer_device, req); in mod_rq_state()
507 advance_conn_req_not_net_done(peer_device, req); in mod_rq_state()
513 if (req->i.waiting) in mod_rq_state()
516 drbd_req_put_completion_ref(req, m, c_put); in mod_rq_state()
517 kref_put(&req->kref, drbd_req_destroy); in mod_rq_state()
520 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) in drbd_report_io_error() argument
526 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", in drbd_report_io_error()
527 (unsigned long long)req->i.sector, in drbd_report_io_error()
528 req->i.size >> 9, in drbd_report_io_error()
538 static inline bool is_pending_write_protocol_A(struct drbd_request *req) in is_pending_write_protocol_A() argument
540 return (req->rq_state & in is_pending_write_protocol_A()
560 int __req_mod(struct drbd_request *req, enum drbd_req_event what, in __req_mod() argument
564 struct drbd_device *const device = req->device; in __req_mod()
586 D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); in __req_mod()
591 req->rq_state |= in __req_mod()
594 mod_rq_state(req, m, 0, RQ_NET_PENDING); in __req_mod()
599 D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK)); in __req_mod()
600 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); in __req_mod()
604 if (req->rq_state & RQ_WRITE) in __req_mod()
605 device->writ_cnt += req->i.size >> 9; in __req_mod()
607 device->read_cnt += req->i.size >> 9; in __req_mod()
609 mod_rq_state(req, m, RQ_LOCAL_PENDING, in __req_mod()
614 mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); in __req_mod()
618 drbd_report_io_error(device, req); in __req_mod()
620 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); in __req_mod()
624 drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size); in __req_mod()
625 drbd_report_io_error(device, req); in __req_mod()
630 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); in __req_mod()
637 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); in __req_mod()
651 D_ASSERT(device, drbd_interval_empty(&req->i)); in __req_mod()
652 drbd_insert_interval(&device->read_requests, &req->i); in __req_mod()
656 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
657 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); in __req_mod()
658 mod_rq_state(req, m, 0, RQ_NET_QUEUED); in __req_mod()
659 req->w.cb = w_send_read_req; in __req_mod()
661 &req->w); in __req_mod()
670 D_ASSERT(device, drbd_interval_empty(&req->i)); in __req_mod()
671 drbd_insert_interval(&device->write_requests, &req->i); in __req_mod()
674 * In case the req ended up on the transfer log before being in __req_mod()
680 * _req_add_to_epoch(req); this has to be after the in __req_mod()
681 * _maybe_start_new_epoch(req); which happened in in __req_mod()
685 * Add req to the (now) current epoch (barrier). */ in __req_mod()
693 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
694 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); in __req_mod()
695 req->w.cb = w_send_dblock; in __req_mod()
697 &req->w); in __req_mod()
710 mod_rq_state(req, m, 0, RQ_NET_QUEUED); in __req_mod()
711 req->w.cb = w_send_out_of_sync; in __req_mod()
713 &req->w); in __req_mod()
721 mod_rq_state(req, m, RQ_NET_QUEUED, 0); in __req_mod()
726 if (is_pending_write_protocol_A(req)) in __req_mod()
729 mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING, in __req_mod()
732 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); in __req_mod()
741 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); in __req_mod()
746 mod_rq_state(req, m, in __req_mod()
759 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
760 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); in __req_mod()
761 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); in __req_mod()
765 req->rq_state |= RQ_NET_SIS; in __req_mod()
777 D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); in __req_mod()
782 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); in __req_mod()
786 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); in __req_mod()
791 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
792 req->rq_state |= RQ_POSTPONED; in __req_mod()
793 if (req->i.waiting) in __req_mod()
801 mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); in __req_mod()
805 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) in __req_mod()
807 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); in __req_mod()
811 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) in __req_mod()
814 mod_rq_state(req, m, in __req_mod()
819 if (bio_data_dir(req->master_bio) == WRITE) in __req_mod()
823 req->w.cb = w_restart_disk_io; in __req_mod()
825 &req->w); in __req_mod()
830 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { in __req_mod()
831 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); in __req_mod()
840 if (!(req->rq_state & RQ_NET_OK)) { in __req_mod()
841 /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync? in __req_mod()
844 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); in __req_mod()
845 if (req->w.cb) { in __req_mod()
848 &req->w); in __req_mod()
849 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; in __req_mod()
857 if (!(req->rq_state & RQ_WRITE)) in __req_mod()
860 if (req->rq_state & RQ_NET_PENDING) { in __req_mod()
870 mod_rq_state(req, m, RQ_COMPLETION_SUSP, in __req_mod()
871 (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); in __req_mod()
875 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
876 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); in __req_mod()
881 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); in __req_mod()
953 static void complete_conflicting_writes(struct drbd_request *req) in complete_conflicting_writes() argument
956 struct drbd_device *device = req->device; in complete_conflicting_writes()
958 sector_t sector = req->i.sector; in complete_conflicting_writes()
959 int size = req->i.size; in complete_conflicting_writes()
1032 /* If this returns false, and req->private_bio is still set,
1035 * If it returns false, but req->private_bio is not set,
1038 * Otherwise, this destroys req->private_bio, if any,
1041 static bool do_remote_read(struct drbd_request *req) in do_remote_read() argument
1043 struct drbd_device *device = req->device; in do_remote_read()
1046 if (req->private_bio) { in do_remote_read()
1048 req->i.sector, req->i.size)) { in do_remote_read()
1049 bio_put(req->private_bio); in do_remote_read()
1050 req->private_bio = NULL; in do_remote_read()
1058 if (req->private_bio == NULL) in do_remote_read()
1068 if (rbm == RB_PREFER_LOCAL && req->private_bio) in do_remote_read()
1071 if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { in do_remote_read()
1072 if (req->private_bio) { in do_remote_read()
1073 bio_put(req->private_bio); in do_remote_read()
1074 req->private_bio = NULL; in do_remote_read()
1104 static int drbd_process_write_request(struct drbd_request *req) in drbd_process_write_request() argument
1106 struct drbd_device *device = req->device; in drbd_process_write_request()
1119 if (unlikely(req->i.size == 0)) { in drbd_process_write_request()
1121 D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH); in drbd_process_write_request()
1123 _req_mod(req, QUEUE_AS_DRBD_BARRIER, peer_device); in drbd_process_write_request()
1133 _req_mod(req, TO_BE_SENT, peer_device); in drbd_process_write_request()
1134 _req_mod(req, QUEUE_FOR_NET_WRITE, peer_device); in drbd_process_write_request()
1135 } else if (drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size)) in drbd_process_write_request()
1136 _req_mod(req, QUEUE_FOR_SEND_OOS, peer_device); in drbd_process_write_request()
1141 static void drbd_process_discard_or_zeroes_req(struct drbd_request *req, int flags) in drbd_process_discard_or_zeroes_req() argument
1143 int err = drbd_issue_discard_or_zero_out(req->device, in drbd_process_discard_or_zeroes_req()
1144 req->i.sector, req->i.size >> 9, flags); in drbd_process_discard_or_zeroes_req()
1146 req->private_bio->bi_status = BLK_STS_IOERR; in drbd_process_discard_or_zeroes_req()
1147 bio_endio(req->private_bio); in drbd_process_discard_or_zeroes_req()
1151 drbd_submit_req_private_bio(struct drbd_request *req) in drbd_submit_req_private_bio() argument
1153 struct drbd_device *device = req->device; in drbd_submit_req_private_bio()
1154 struct bio *bio = req->private_bio; in drbd_submit_req_private_bio()
1173 drbd_process_discard_or_zeroes_req(req, EE_ZEROOUT | in drbd_submit_req_private_bio()
1176 drbd_process_discard_or_zeroes_req(req, EE_TRIM); in drbd_submit_req_private_bio()
1184 static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) in drbd_queue_write() argument
1187 list_add_tail(&req->tl_requests, &device->submit.writes); in drbd_queue_write()
1188 list_add_tail(&req->req_pending_master_completion, in drbd_queue_write()
1205 struct drbd_request *req; in drbd_request_prepare() local
1208 req = drbd_req_new(device, bio); in drbd_request_prepare()
1209 if (!req) { in drbd_request_prepare()
1213 drbd_err(device, "could not kmalloc() req\n"); in drbd_request_prepare()
1220 req->start_jif = bio_start_io_acct(req->master_bio); in drbd_request_prepare()
1223 req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, in drbd_request_prepare()
1226 req->private_bio->bi_private = req; in drbd_request_prepare()
1227 req->private_bio->bi_end_io = drbd_request_endio; in drbd_request_prepare()
1235 if (rw == WRITE && req->private_bio && req->i.size in drbd_request_prepare()
1237 if (!drbd_al_begin_io_fastpath(device, &req->i)) in drbd_request_prepare()
1239 req->rq_state |= RQ_IN_ACT_LOG; in drbd_request_prepare()
1240 req->in_actlog_jif = jiffies; in drbd_request_prepare()
1242 return req; in drbd_request_prepare()
1246 drbd_queue_write(device, req); in drbd_request_prepare()
1276 struct drbd_request *req = plug->most_recent_req; in drbd_unplug() local
1279 if (!req) in drbd_unplug()
1285 req->rq_state |= RQ_UNPLUG; in drbd_unplug()
1287 drbd_queue_unplug(req->device); in drbd_unplug()
1288 kref_put(&req->kref, drbd_req_destroy); in drbd_unplug()
1306 static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req) in drbd_update_plug() argument
1311 kref_get(&req->kref); in drbd_update_plug()
1312 plug->most_recent_req = req; in drbd_update_plug()
1317 static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) in drbd_send_and_submit() argument
1321 const int rw = bio_data_dir(req->master_bio); in drbd_send_and_submit()
1331 complete_conflicting_writes(req); in drbd_send_and_submit()
1342 req->rq_state |= RQ_POSTPONED; in drbd_send_and_submit()
1343 if (req->private_bio) { in drbd_send_and_submit()
1344 bio_put(req->private_bio); in drbd_send_and_submit()
1345 req->private_bio = NULL; in drbd_send_and_submit()
1352 * We must do this before req is registered on any lists. in drbd_send_and_submit()
1355 if (!do_remote_read(req) && !req->private_bio) in drbd_send_and_submit()
1360 req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr); in drbd_send_and_submit()
1364 if (likely(req->i.size!=0)) { in drbd_send_and_submit()
1368 list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log); in drbd_send_and_submit()
1372 if (req->private_bio && !may_do_writes(device)) { in drbd_send_and_submit()
1373 bio_put(req->private_bio); in drbd_send_and_submit()
1374 req->private_bio = NULL; in drbd_send_and_submit()
1378 if (!drbd_process_write_request(req)) in drbd_send_and_submit()
1383 if (req->private_bio == NULL) { in drbd_send_and_submit()
1384 _req_mod(req, TO_BE_SENT, peer_device); in drbd_send_and_submit()
1385 _req_mod(req, QUEUE_FOR_NET_READ, peer_device); in drbd_send_and_submit()
1393 drbd_update_plug(plug, req); in drbd_send_and_submit()
1398 if (list_empty(&req->req_pending_master_completion)) in drbd_send_and_submit()
1399 list_add_tail(&req->req_pending_master_completion, in drbd_send_and_submit()
1401 if (req->private_bio) { in drbd_send_and_submit()
1403 req->pre_submit_jif = jiffies; in drbd_send_and_submit()
1404 list_add_tail(&req->req_pending_local, in drbd_send_and_submit()
1406 _req_mod(req, TO_BE_SUBMITTED, NULL); in drbd_send_and_submit()
1413 (unsigned long long)req->i.sector, req->i.size >> 9); in drbd_send_and_submit()
1419 drbd_req_put_completion_ref(req, &m, 1); in drbd_send_and_submit()
1426 * (e.g. remote read), req may already be invalid now. in drbd_send_and_submit()
1427 * That's why we cannot check on req->private_bio. */ in drbd_send_and_submit()
1429 drbd_submit_req_private_bio(req); in drbd_send_and_submit()
1436 struct drbd_request *req = drbd_request_prepare(device, bio); in __drbd_make_request() local
1437 if (IS_ERR_OR_NULL(req)) in __drbd_make_request()
1439 drbd_send_and_submit(device, req); in __drbd_make_request()
1445 struct drbd_request *req, *tmp; in submit_fast_path() local
1448 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { in submit_fast_path()
1449 const int rw = bio_data_dir(req->master_bio); in submit_fast_path()
1452 && req->private_bio && req->i.size in submit_fast_path()
1454 if (!drbd_al_begin_io_fastpath(device, &req->i)) in submit_fast_path()
1457 req->rq_state |= RQ_IN_ACT_LOG; in submit_fast_path()
1458 req->in_actlog_jif = jiffies; in submit_fast_path()
1462 list_del_init(&req->tl_requests); in submit_fast_path()
1463 drbd_send_and_submit(device, req); in submit_fast_path()
1473 struct drbd_request *req; in prepare_al_transaction_nonblock() local
1478 while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) { in prepare_al_transaction_nonblock()
1479 err = drbd_al_begin_io_nonblock(device, &req->i); in prepare_al_transaction_nonblock()
1485 list_move_tail(&req->tl_requests, later); in prepare_al_transaction_nonblock()
1487 list_move_tail(&req->tl_requests, pending); in prepare_al_transaction_nonblock()
1498 struct drbd_request *req; in send_and_submit_pending() local
1501 while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) { in send_and_submit_pending()
1502 req->rq_state |= RQ_IN_ACT_LOG; in send_and_submit_pending()
1503 req->in_actlog_jif = jiffies; in send_and_submit_pending()
1505 list_del_init(&req->tl_requests); in send_and_submit_pending()
1506 drbd_send_and_submit(device, req); in send_and_submit_pending()