Lines Matching refs:connection

96 	drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w);  in drbd_endio_read_sec_final()
107 struct drbd_connection *connection = peer_device->connection; in drbd_endio_write_sec_final() local
149 if (connection->cstate >= C_WF_REPORT_PARAMS) { in drbd_endio_write_sec_final()
151 if (!queue_work(connection->ack_sender, &peer_device->send_acks_work)) in drbd_endio_write_sec_final()
353 digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm); in w_e_send_csum()
358 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest); in w_e_send_csum()
448 &first_peer_device(device)->connection->sender_work, in resync_timer_fn()
588 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; in make_resync_request() local
616 if (connection->agreed_features & DRBD_FF_THIN_RESYNC) { in make_resync_request()
630 mutex_lock(&connection->data.mutex); in make_resync_request()
631 if (connection->data.socket) { in make_resync_request()
632 struct sock *sk = connection->data.socket->sk; in make_resync_request()
642 mutex_unlock(&connection->data.mutex); in make_resync_request()
844 struct drbd_connection *connection = first_peer_device(device)->connection; in ping_peer() local
846 clear_bit(GOT_PING_ACK, &connection->flags); in ping_peer()
847 request_ping(connection); in ping_peer()
848 wait_event(connection->ping_wait, in ping_peer()
849 test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED); in ping_peer()
855 struct drbd_connection *connection = peer_device->connection; in drbd_resync_finished() local
877 drbd_queue_work(&connection->sender_work, &dw->w); in drbd_resync_finished()
1003 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in drbd_resync_finished()
1011 conn_khelper(connection, "unfence-peer"); in drbd_resync_finished()
1194 if (peer_device->connection->csums_tfm) { in w_e_end_csum_rs_req()
1195 digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm); in w_e_end_csum_rs_req()
1200 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest); in w_e_end_csum_rs_req()
1245 digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm); in w_e_end_ov_req()
1253 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest); in w_e_end_ov_req()
1318 digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm); in w_e_end_ov_reply()
1321 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest); in w_e_end_ov_reply()
1367 static int drbd_send_barrier(struct drbd_connection *connection) in drbd_send_barrier() argument
1372 sock = &connection->data; in drbd_send_barrier()
1373 p = conn_prepare_command(connection, sock); in drbd_send_barrier()
1376 p->barrier = connection->send.current_epoch_nr; in drbd_send_barrier()
1378 connection->send.current_epoch_writes = 0; in drbd_send_barrier()
1379 connection->send.last_sent_barrier_jif = jiffies; in drbd_send_barrier()
1381 return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0); in drbd_send_barrier()
1386 struct drbd_socket *sock = &pd->connection->data; in pd_send_unplug_remote()
1402 static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch) in re_init_if_first_write() argument
1404 if (!connection->send.seen_any_write_yet) { in re_init_if_first_write()
1405 connection->send.seen_any_write_yet = true; in re_init_if_first_write()
1406 connection->send.current_epoch_nr = epoch; in re_init_if_first_write()
1407 connection->send.current_epoch_writes = 0; in re_init_if_first_write()
1408 connection->send.last_sent_barrier_jif = jiffies; in re_init_if_first_write()
1412 static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch) in maybe_send_barrier() argument
1415 if (!connection->send.seen_any_write_yet) in maybe_send_barrier()
1417 if (connection->send.current_epoch_nr != epoch) { in maybe_send_barrier()
1418 if (connection->send.current_epoch_writes) in maybe_send_barrier()
1419 drbd_send_barrier(connection); in maybe_send_barrier()
1420 connection->send.current_epoch_nr = epoch; in maybe_send_barrier()
1429 struct drbd_connection *const connection = peer_device->connection; in w_send_out_of_sync() local
1442 maybe_send_barrier(connection, req->epoch); in w_send_out_of_sync()
1460 struct drbd_connection *connection = peer_device->connection; in w_send_dblock() local
1470 re_init_if_first_write(connection, req->epoch); in w_send_dblock()
1471 maybe_send_barrier(connection, req->epoch); in w_send_dblock()
1472 connection->send.current_epoch_writes++; in w_send_dblock()
1493 struct drbd_connection *connection = peer_device->connection; in w_send_read_req() local
1505 maybe_send_barrier(connection, req->epoch); in w_send_read_req()
1718 static bool use_checksum_based_resync(struct drbd_connection *connection, struct drbd_device *devic… in use_checksum_based_resync() argument
1722 csums_after_crash_only = rcu_dereference(connection->net_conf)->csums_after_crash_only; in use_checksum_based_resync()
1724 return connection->agreed_pro_version >= 89 && /* supported? */ in use_checksum_based_resync()
1725 connection->csums_tfm && /* configured? */ in use_checksum_based_resync()
1741 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; in drbd_start_resync() local
1750 if (!connection) { in drbd_start_resync()
1765 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); in drbd_start_resync()
1778 conn_request_state(connection, in drbd_start_resync()
1786 if (current == connection->worker.task) { in drbd_start_resync()
1864 device->use_csums = use_checksum_based_resync(connection, device); in drbd_start_resync()
1876 if (side == C_SYNC_SOURCE && connection->agreed_pro_version < 96) in drbd_start_resync()
1879 if (connection->agreed_pro_version < 95 && device->rs_total == 0) { in drbd_start_resync()
1895 nc = rcu_dereference(connection->net_conf); in drbd_start_resync()
2056 static void do_unqueued_work(struct drbd_connection *connection) in do_unqueued_work() argument
2062 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in do_unqueued_work()
2085 static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list) in wait_for_work() argument
2091 dequeue_work_batch(&connection->sender_work, work_list); in wait_for_work()
2102 nc = rcu_dereference(connection->net_conf); in wait_for_work()
2106 mutex_lock(&connection->data.mutex); in wait_for_work()
2107 if (connection->data.socket) in wait_for_work()
2108 tcp_sock_set_cork(connection->data.socket->sk, false); in wait_for_work()
2109 mutex_unlock(&connection->data.mutex); in wait_for_work()
2114 prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE); in wait_for_work()
2115 spin_lock_irq(&connection->resource->req_lock); in wait_for_work()
2116 spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ in wait_for_work()
2117 if (!list_empty(&connection->sender_work.q)) in wait_for_work()
2118 list_splice_tail_init(&connection->sender_work.q, work_list); in wait_for_work()
2119 spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ in wait_for_work()
2121 spin_unlock_irq(&connection->resource->req_lock); in wait_for_work()
2133 atomic_read(&connection->current_tle_nr) != in wait_for_work()
2134 connection->send.current_epoch_nr; in wait_for_work()
2135 spin_unlock_irq(&connection->resource->req_lock); in wait_for_work()
2138 maybe_send_barrier(connection, in wait_for_work()
2139 connection->send.current_epoch_nr + 1); in wait_for_work()
2141 if (test_bit(DEVICE_WORK_PENDING, &connection->flags)) in wait_for_work()
2145 if (get_t_state(&connection->worker) != RUNNING) in wait_for_work()
2153 finish_wait(&connection->sender_work.q_wait, &wait); in wait_for_work()
2157 nc = rcu_dereference(connection->net_conf); in wait_for_work()
2160 mutex_lock(&connection->data.mutex); in wait_for_work()
2161 if (connection->data.socket) { in wait_for_work()
2163 tcp_sock_set_cork(connection->data.socket->sk, true); in wait_for_work()
2165 tcp_sock_set_cork(connection->data.socket->sk, false); in wait_for_work()
2167 mutex_unlock(&connection->data.mutex); in wait_for_work()
2172 struct drbd_connection *connection = thi->connection; in drbd_worker() local
2182 update_worker_timing_details(connection, wait_for_work); in drbd_worker()
2183 wait_for_work(connection, &work_list); in drbd_worker()
2186 if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) { in drbd_worker()
2187 update_worker_timing_details(connection, do_unqueued_work); in drbd_worker()
2188 do_unqueued_work(connection); in drbd_worker()
2194 drbd_warn(connection, "Worker got an unexpected signal\n"); in drbd_worker()
2206 update_worker_timing_details(connection, w->cb); in drbd_worker()
2207 if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0) in drbd_worker()
2209 if (connection->cstate >= C_WF_REPORT_PARAMS) in drbd_worker()
2210 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); in drbd_worker()
2215 if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) { in drbd_worker()
2216 update_worker_timing_details(connection, do_unqueued_work); in drbd_worker()
2217 do_unqueued_work(connection); in drbd_worker()
2222 update_worker_timing_details(connection, w->cb); in drbd_worker()
2225 dequeue_work_batch(&connection->sender_work, &work_list); in drbd_worker()
2226 } while (!list_empty(&work_list) || test_bit(DEVICE_WORK_PENDING, &connection->flags)); in drbd_worker()
2229 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in drbd_worker()