Lines Matching +full:total +full:- +full:timeout

2  *		INETPEER - A storage for permanent information about peers
28 * We keep one entry for each peer IP address. The nodes contains long-living
33 * time has been passed since its last use. The less-recently-used entry can
34 * also be removed if the pool is overloaded i.e. if the total amount of
35 * entries is greater-or-equal than the threshold.
59 bp->rb_root = RB_ROOT; in inet_peer_base_init()
60 seqlock_init(&bp->lock); in inet_peer_base_init()
61 bp->total = 0; in inet_peer_base_init()
87 /* Called with rcu_read_lock() or base->lock held */
99 pp = &base->rb_root.rb_node; in lookup()
109 cmp = inetpeer_addr_cmp(daddr, &p->daddr); in lookup()
111 if (!refcount_inc_not_zero(&p->refcnt)) in lookup()
118 } else if (unlikely(read_seqretry(&base->lock, seq))) { in lookup()
121 if (cmp == -1) in lookup()
122 pp = &next->rb_left; in lookup()
124 pp = &next->rb_right; in lookup()
150 if (base->total >= peer_threshold) in inet_peer_gc()
153 ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ * in inet_peer_gc()
154 base->total / peer_threshold * HZ; in inet_peer_gc()
161 delta = (__u32)jiffies - READ_ONCE(p->dtime); in inet_peer_gc()
163 if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) in inet_peer_gc()
169 rb_erase(&p->rb_node, &base->rb_root); in inet_peer_gc()
170 base->total--; in inet_peer_gc()
171 call_rcu(&p->rcu, inetpeer_free_rcu); in inet_peer_gc()
189 seq = read_seqbegin(&base->lock); in inet_getpeer()
191 invalidated = read_seqretry(&base->lock, seq); in inet_getpeer()
205 write_seqlock_bh(&base->lock); in inet_getpeer()
212 p->daddr = *daddr; in inet_getpeer()
213 p->dtime = (__u32)jiffies; in inet_getpeer()
214 refcount_set(&p->refcnt, 2); in inet_getpeer()
215 atomic_set(&p->rid, 0); in inet_getpeer()
216 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; in inet_getpeer()
217 p->rate_tokens = 0; in inet_getpeer()
218 p->n_redirects = 0; in inet_getpeer()
222 p->rate_last = jiffies - 60*HZ; in inet_getpeer()
224 rb_link_node(&p->rb_node, parent, pp); in inet_getpeer()
225 rb_insert_color(&p->rb_node, &base->rb_root); in inet_getpeer()
226 base->total++; in inet_getpeer()
231 write_sequnlock_bh(&base->lock); in inet_getpeer()
242 WRITE_ONCE(p->dtime, (__u32)jiffies); in inet_putpeer()
244 if (refcount_dec_and_test(&p->refcnt)) in inet_putpeer()
245 call_rcu(&p->rcu, inetpeer_free_rcu); in inet_putpeer()
258 * for one "ip object" is shared - and these ICMPs are twice limited:
267 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) in inet_peer_xrlim_allow() argument
275 token = peer->rate_tokens; in inet_peer_xrlim_allow()
277 token += now - peer->rate_last; in inet_peer_xrlim_allow()
278 peer->rate_last = now; in inet_peer_xrlim_allow()
279 if (token > XRLIM_BURST_FACTOR * timeout) in inet_peer_xrlim_allow()
280 token = XRLIM_BURST_FACTOR * timeout; in inet_peer_xrlim_allow()
281 if (token >= timeout) { in inet_peer_xrlim_allow()
282 token -= timeout; in inet_peer_xrlim_allow()
285 peer->rate_tokens = token; in inet_peer_xrlim_allow()
292 struct rb_node *p = rb_first(&base->rb_root); in inetpeer_invalidate_tree()
298 rb_erase(&peer->rb_node, &base->rb_root); in inetpeer_invalidate_tree()
303 base->total = 0; in inetpeer_invalidate_tree()