Lines Matching full:peer

28  * Hash a peer key.
62 /* Step through the peer address in 16-bit portions for speed */ in rxrpc_peer_hash_key()
71 * Compare a peer to a key. Return -ve, 0 or +ve to indicate less than, same
78 static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer, in rxrpc_peer_cmp_key() argument
85 diff = ((peer->hash_key - hash_key) ?: in rxrpc_peer_cmp_key()
86 ((unsigned long)peer->local - (unsigned long)local) ?: in rxrpc_peer_cmp_key()
87 (peer->srx.transport_type - srx->transport_type) ?: in rxrpc_peer_cmp_key()
88 (peer->srx.transport_len - srx->transport_len) ?: in rxrpc_peer_cmp_key()
89 (peer->srx.transport.family - srx->transport.family)); in rxrpc_peer_cmp_key()
95 return ((u16 __force)peer->srx.transport.sin.sin_port - in rxrpc_peer_cmp_key()
97 memcmp(&peer->srx.transport.sin.sin_addr, in rxrpc_peer_cmp_key()
102 return ((u16 __force)peer->srx.transport.sin6.sin6_port - in rxrpc_peer_cmp_key()
104 memcmp(&peer->srx.transport.sin6.sin6_addr, in rxrpc_peer_cmp_key()
121 struct rxrpc_peer *peer; in __rxrpc_lookup_peer_rcu() local
124 hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) { in __rxrpc_lookup_peer_rcu()
125 if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 && in __rxrpc_lookup_peer_rcu()
126 refcount_read(&peer->ref) > 0) in __rxrpc_lookup_peer_rcu()
127 return peer; in __rxrpc_lookup_peer_rcu()
139 struct rxrpc_peer *peer; in rxrpc_lookup_peer_rcu() local
142 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); in rxrpc_lookup_peer_rcu()
143 if (peer) in rxrpc_lookup_peer_rcu()
144 _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref)); in rxrpc_lookup_peer_rcu()
145 return peer; in rxrpc_lookup_peer_rcu()
149 * assess the MTU size for the network interface through which this peer is
153 struct rxrpc_peer *peer) in rxrpc_assess_MTU_size() argument
164 peer->if_mtu = 1500; in rxrpc_assess_MTU_size()
167 switch (peer->srx.transport.family) { in rxrpc_assess_MTU_size()
171 peer->srx.transport.sin.sin_addr.s_addr, 0, in rxrpc_assess_MTU_size()
185 memcpy(&fl6->daddr, &peer->srx.transport.sin6.sin6_addr, in rxrpc_assess_MTU_size()
201 peer->if_mtu = dst_mtu(dst); in rxrpc_assess_MTU_size()
204 _leave(" [if_mtu %u]", peer->if_mtu); in rxrpc_assess_MTU_size()
208 * Allocate a peer.
213 struct rxrpc_peer *peer; in rxrpc_alloc_peer() local
217 peer = kzalloc(sizeof(struct rxrpc_peer), gfp); in rxrpc_alloc_peer()
218 if (peer) { in rxrpc_alloc_peer()
219 refcount_set(&peer->ref, 1); in rxrpc_alloc_peer()
220 peer->local = rxrpc_get_local(local, rxrpc_local_get_peer); in rxrpc_alloc_peer()
221 INIT_HLIST_HEAD(&peer->error_targets); in rxrpc_alloc_peer()
222 peer->service_conns = RB_ROOT; in rxrpc_alloc_peer()
223 seqlock_init(&peer->service_conn_lock); in rxrpc_alloc_peer()
224 spin_lock_init(&peer->lock); in rxrpc_alloc_peer()
225 spin_lock_init(&peer->rtt_input_lock); in rxrpc_alloc_peer()
226 peer->debug_id = atomic_inc_return(&rxrpc_debug_id); in rxrpc_alloc_peer()
228 rxrpc_peer_init_rtt(peer); in rxrpc_alloc_peer()
230 peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW; in rxrpc_alloc_peer()
231 trace_rxrpc_peer(peer->debug_id, 1, why); in rxrpc_alloc_peer()
234 _leave(" = %p", peer); in rxrpc_alloc_peer()
235 return peer; in rxrpc_alloc_peer()
239 * Initialise peer record.
241 static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer, in rxrpc_init_peer() argument
244 peer->hash_key = hash_key; in rxrpc_init_peer()
245 rxrpc_assess_MTU_size(local, peer); in rxrpc_init_peer()
246 peer->mtu = peer->if_mtu; in rxrpc_init_peer()
247 peer->rtt_last_req = ktime_get_real(); in rxrpc_init_peer()
249 switch (peer->srx.transport.family) { in rxrpc_init_peer()
251 peer->hdrsize = sizeof(struct iphdr); in rxrpc_init_peer()
255 peer->hdrsize = sizeof(struct ipv6hdr); in rxrpc_init_peer()
262 switch (peer->srx.transport_type) { in rxrpc_init_peer()
264 peer->hdrsize += sizeof(struct udphdr); in rxrpc_init_peer()
270 peer->hdrsize += sizeof(struct rxrpc_wire_header); in rxrpc_init_peer()
271 peer->maxdata = peer->mtu - peer->hdrsize; in rxrpc_init_peer()
275 * Set up a new peer.
282 struct rxrpc_peer *peer; in rxrpc_create_peer() local
286 peer = rxrpc_alloc_peer(local, gfp, rxrpc_peer_new_client); in rxrpc_create_peer()
287 if (peer) { in rxrpc_create_peer()
288 memcpy(&peer->srx, srx, sizeof(*srx)); in rxrpc_create_peer()
289 rxrpc_init_peer(local, peer, hash_key); in rxrpc_create_peer()
292 _leave(" = %p", peer); in rxrpc_create_peer()
293 return peer; in rxrpc_create_peer()
296 static void rxrpc_free_peer(struct rxrpc_peer *peer) in rxrpc_free_peer() argument
298 trace_rxrpc_peer(peer->debug_id, 0, rxrpc_peer_free); in rxrpc_free_peer()
299 rxrpc_put_local(peer->local, rxrpc_local_put_peer); in rxrpc_free_peer()
300 kfree_rcu(peer, rcu); in rxrpc_free_peer()
304 * Set up a new incoming peer. There shouldn't be any other matching peers
308 void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) in rxrpc_new_incoming_peer() argument
313 hash_key = rxrpc_peer_hash_key(local, &peer->srx); in rxrpc_new_incoming_peer()
314 rxrpc_init_peer(local, peer, hash_key); in rxrpc_new_incoming_peer()
317 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); in rxrpc_new_incoming_peer()
318 list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new); in rxrpc_new_incoming_peer()
328 struct rxrpc_peer *peer, *candidate; in rxrpc_lookup_peer() local
334 /* search the peer list first */ in rxrpc_lookup_peer()
336 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); in rxrpc_lookup_peer()
337 if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_lookup_client)) in rxrpc_lookup_peer()
338 peer = NULL; in rxrpc_lookup_peer()
341 if (!peer) { in rxrpc_lookup_peer()
342 /* The peer is not yet present in hash - create a candidate in rxrpc_lookup_peer()
354 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); in rxrpc_lookup_peer()
355 if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_lookup_client)) in rxrpc_lookup_peer()
356 peer = NULL; in rxrpc_lookup_peer()
357 if (!peer) { in rxrpc_lookup_peer()
366 if (peer) in rxrpc_lookup_peer()
369 peer = candidate; in rxrpc_lookup_peer()
372 _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref)); in rxrpc_lookup_peer()
373 return peer; in rxrpc_lookup_peer()
377 * Get a ref on a peer record.
379 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why) in rxrpc_get_peer() argument
383 __refcount_inc(&peer->ref, &r); in rxrpc_get_peer()
384 trace_rxrpc_peer(peer->debug_id, r + 1, why); in rxrpc_get_peer()
385 return peer; in rxrpc_get_peer()
389 * Get a ref on a peer record unless its usage has already reached 0.
391 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer, in rxrpc_get_peer_maybe() argument
396 if (peer) { in rxrpc_get_peer_maybe()
397 if (__refcount_inc_not_zero(&peer->ref, &r)) in rxrpc_get_peer_maybe()
398 trace_rxrpc_peer(peer->debug_id, r + 1, why); in rxrpc_get_peer_maybe()
400 peer = NULL; in rxrpc_get_peer_maybe()
402 return peer; in rxrpc_get_peer_maybe()
406 * Discard a peer record.
408 static void __rxrpc_put_peer(struct rxrpc_peer *peer) in __rxrpc_put_peer() argument
410 struct rxrpc_net *rxnet = peer->local->rxnet; in __rxrpc_put_peer()
412 ASSERT(hlist_empty(&peer->error_targets)); in __rxrpc_put_peer()
415 hash_del_rcu(&peer->hash_link); in __rxrpc_put_peer()
416 list_del_init(&peer->keepalive_link); in __rxrpc_put_peer()
419 rxrpc_free_peer(peer); in __rxrpc_put_peer()
423 * Drop a ref on a peer record.
425 void rxrpc_put_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why) in rxrpc_put_peer() argument
431 if (peer) { in rxrpc_put_peer()
432 debug_id = peer->debug_id; in rxrpc_put_peer()
433 dead = __refcount_dec_and_test(&peer->ref, &r); in rxrpc_put_peer()
436 __rxrpc_put_peer(peer); in rxrpc_put_peer()
441 * Make sure all peer records have been discarded.
445 struct rxrpc_peer *peer; in rxrpc_destroy_all_peers() local
452 hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) { in rxrpc_destroy_all_peers()
453 pr_err("Leaked peer %u {%u} %pISp\n", in rxrpc_destroy_all_peers()
454 peer->debug_id, in rxrpc_destroy_all_peers()
455 refcount_read(&peer->ref), in rxrpc_destroy_all_peers()
456 &peer->srx.transport); in rxrpc_destroy_all_peers()
462 * rxrpc_kernel_get_call_peer - Get the peer address of a call
466 * Get a record for the remote peer in a call.
470 return call->peer; in rxrpc_kernel_get_call_peer()
475 * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
476 * @peer: The peer to query
478 * Get the call's peer smoothed RTT in uS or UINT_MAX if we have no samples.
480 unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *peer) in rxrpc_kernel_get_srtt() argument
482 return peer->rtt_count > 0 ? peer->srtt_us >> 3 : UINT_MAX; in rxrpc_kernel_get_srtt()
487 * rxrpc_kernel_remote_srx - Get the address of a peer
488 * @peer: The peer to query
490 * Get a pointer to the address from a peer record. The caller is responsible
493 const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer) in rxrpc_kernel_remote_srx() argument
495 return peer ? &peer->srx : &rxrpc_null_addr; in rxrpc_kernel_remote_srx()
500 * rxrpc_kernel_remote_addr - Get the peer transport address of a call
501 * @peer: The peer to query
503 * Get a pointer to the transport address from a peer record. The caller is
506 const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer) in rxrpc_kernel_remote_addr() argument
509 (peer ? &peer->srx.transport : &rxrpc_null_addr.transport); in rxrpc_kernel_remote_addr()