Lines Matching full:incoming

1442 static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)  in submit_fast_path()  argument
1448 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { in submit_fast_path()
1469 struct list_head *incoming, in prepare_al_transaction_nonblock() argument
1478 while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) { in prepare_al_transaction_nonblock()
1514 LIST_HEAD(incoming); /* from drbd_make_request() */ in do_submit()
1518 /* grab new incoming requests */ in do_submit()
1520 list_splice_tail_init(&device->submit.writes, &incoming); in do_submit()
1526 /* move used-to-be-busy back to front of incoming */ in do_submit()
1527 list_splice_init(&busy, &incoming); in do_submit()
1528 submit_fast_path(device, &incoming); in do_submit()
1529 if (list_empty(&incoming)) in do_submit()
1535 list_splice_init(&busy, &incoming); in do_submit()
1536 prepare_al_transaction_nonblock(device, &incoming, &pending, &busy); in do_submit()
1543 * incoming requests, we still must not totally starve new in do_submit()
1545 * Something left on &incoming means there had not been in do_submit()
1553 if (!list_empty(&incoming)) in do_submit()
1557 * on incoming: all moved to busy! in do_submit()
1560 list_splice_tail_init(&device->submit.writes, &incoming); in do_submit()
1565 /* If the transaction was full, before all incoming requests in do_submit()
1567 * without splicing in more incoming requests from upper layers. in do_submit()
1569 * Else, if all incoming have been processed, in do_submit()
1581 while (list_empty(&incoming)) { in do_submit()
1601 list_splice_tail_init(&more_incoming, &incoming); in do_submit()