Lines Matching +full:x +full:- +full:max

2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
42 if (c4iw_id_table_alloc(&rdev->resource.qid_table, in c4iw_init_qid_table()
43 rdev->lldi.vr->qp.start, in c4iw_init_qid_table()
44 rdev->lldi.vr->qp.size, in c4iw_init_qid_table()
45 rdev->lldi.vr->qp.size, 0)) in c4iw_init_qid_table()
46 return -ENOMEM; in c4iw_init_qid_table()
48 for (i = rdev->lldi.vr->qp.start; in c4iw_init_qid_table()
49 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) in c4iw_init_qid_table()
50 if (!(i & rdev->qpmask)) in c4iw_init_qid_table()
51 c4iw_id_free(&rdev->resource.qid_table, i); in c4iw_init_qid_table()
60 err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1, in c4iw_init_resource()
67 err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0, in c4iw_init_resource()
72 err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0, in c4iw_init_resource()
75 err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0, in c4iw_init_resource()
81 c4iw_id_table_free(&rdev->resource.pdid_table); in c4iw_init_resource()
83 c4iw_id_table_free(&rdev->resource.qid_table); in c4iw_init_resource()
85 c4iw_id_table_free(&rdev->resource.tpt_table); in c4iw_init_resource()
87 return -ENOMEM; in c4iw_init_resource()
97 if (entry == (u32)(-1)) in c4iw_get_resource()
104 pr_debug("entry 0x%x\n", entry); in c4iw_put_resource()
114 mutex_lock(&uctx->lock); in c4iw_get_cqid()
115 if (!list_empty(&uctx->cqids)) { in c4iw_get_cqid()
116 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, in c4iw_get_cqid()
118 list_del(&entry->entry); in c4iw_get_cqid()
119 qid = entry->qid; in c4iw_get_cqid()
122 qid = c4iw_get_resource(&rdev->resource.qid_table); in c4iw_get_cqid()
125 mutex_lock(&rdev->stats.lock); in c4iw_get_cqid()
126 rdev->stats.qid.cur += rdev->qpmask + 1; in c4iw_get_cqid()
127 mutex_unlock(&rdev->stats.lock); in c4iw_get_cqid()
128 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_cqid()
132 entry->qid = i; in c4iw_get_cqid()
133 list_add_tail(&entry->entry, &uctx->cqids); in c4iw_get_cqid()
143 entry->qid = qid; in c4iw_get_cqid()
144 list_add_tail(&entry->entry, &uctx->qpids); in c4iw_get_cqid()
145 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_cqid()
149 entry->qid = i; in c4iw_get_cqid()
150 list_add_tail(&entry->entry, &uctx->qpids); in c4iw_get_cqid()
154 mutex_unlock(&uctx->lock); in c4iw_get_cqid()
155 pr_debug("qid 0x%x\n", qid); in c4iw_get_cqid()
156 mutex_lock(&rdev->stats.lock); in c4iw_get_cqid()
157 if (rdev->stats.qid.cur > rdev->stats.qid.max) in c4iw_get_cqid()
158 rdev->stats.qid.max = rdev->stats.qid.cur; in c4iw_get_cqid()
159 mutex_unlock(&rdev->stats.lock); in c4iw_get_cqid()
171 pr_debug("qid 0x%x\n", qid); in c4iw_put_cqid()
172 entry->qid = qid; in c4iw_put_cqid()
173 mutex_lock(&uctx->lock); in c4iw_put_cqid()
174 list_add_tail(&entry->entry, &uctx->cqids); in c4iw_put_cqid()
175 mutex_unlock(&uctx->lock); in c4iw_put_cqid()
184 mutex_lock(&uctx->lock); in c4iw_get_qpid()
185 if (!list_empty(&uctx->qpids)) { in c4iw_get_qpid()
186 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, in c4iw_get_qpid()
188 list_del(&entry->entry); in c4iw_get_qpid()
189 qid = entry->qid; in c4iw_get_qpid()
192 qid = c4iw_get_resource(&rdev->resource.qid_table); in c4iw_get_qpid()
194 mutex_lock(&rdev->stats.lock); in c4iw_get_qpid()
195 rdev->stats.qid.fail++; in c4iw_get_qpid()
196 mutex_unlock(&rdev->stats.lock); in c4iw_get_qpid()
199 mutex_lock(&rdev->stats.lock); in c4iw_get_qpid()
200 rdev->stats.qid.cur += rdev->qpmask + 1; in c4iw_get_qpid()
201 mutex_unlock(&rdev->stats.lock); in c4iw_get_qpid()
202 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_qpid()
206 entry->qid = i; in c4iw_get_qpid()
207 list_add_tail(&entry->entry, &uctx->qpids); in c4iw_get_qpid()
217 entry->qid = qid; in c4iw_get_qpid()
218 list_add_tail(&entry->entry, &uctx->cqids); in c4iw_get_qpid()
219 for (i = qid + 1; i & rdev->qpmask; i++) { in c4iw_get_qpid()
223 entry->qid = i; in c4iw_get_qpid()
224 list_add_tail(&entry->entry, &uctx->cqids); in c4iw_get_qpid()
228 mutex_unlock(&uctx->lock); in c4iw_get_qpid()
229 pr_debug("qid 0x%x\n", qid); in c4iw_get_qpid()
230 mutex_lock(&rdev->stats.lock); in c4iw_get_qpid()
231 if (rdev->stats.qid.cur > rdev->stats.qid.max) in c4iw_get_qpid()
232 rdev->stats.qid.max = rdev->stats.qid.cur; in c4iw_get_qpid()
233 mutex_unlock(&rdev->stats.lock); in c4iw_get_qpid()
245 pr_debug("qid 0x%x\n", qid); in c4iw_put_qpid()
246 entry->qid = qid; in c4iw_put_qpid()
247 mutex_lock(&uctx->lock); in c4iw_put_qpid()
248 list_add_tail(&entry->entry, &uctx->qpids); in c4iw_put_qpid()
249 mutex_unlock(&uctx->lock); in c4iw_put_qpid()
254 c4iw_id_table_free(&rscp->tpt_table); in c4iw_destroy_resource()
255 c4iw_id_table_free(&rscp->qid_table); in c4iw_destroy_resource()
256 c4iw_id_table_free(&rscp->pdid_table); in c4iw_destroy_resource()
267 unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); in c4iw_pblpool_alloc()
268 pr_debug("addr 0x%x size %d\n", (u32)addr, size); in c4iw_pblpool_alloc()
269 mutex_lock(&rdev->stats.lock); in c4iw_pblpool_alloc()
271 rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); in c4iw_pblpool_alloc()
272 if (rdev->stats.pbl.cur > rdev->stats.pbl.max) in c4iw_pblpool_alloc()
273 rdev->stats.pbl.max = rdev->stats.pbl.cur; in c4iw_pblpool_alloc()
274 kref_get(&rdev->pbl_kref); in c4iw_pblpool_alloc()
276 rdev->stats.pbl.fail++; in c4iw_pblpool_alloc()
277 mutex_unlock(&rdev->stats.lock); in c4iw_pblpool_alloc()
286 gen_pool_destroy(rdev->pbl_pool); in destroy_pblpool()
287 complete(&rdev->pbl_compl); in destroy_pblpool()
292 pr_debug("addr 0x%x size %d\n", addr, size); in c4iw_pblpool_free()
293 mutex_lock(&rdev->stats.lock); in c4iw_pblpool_free()
294 rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); in c4iw_pblpool_free()
295 mutex_unlock(&rdev->stats.lock); in c4iw_pblpool_free()
296 gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); in c4iw_pblpool_free()
297 kref_put(&rdev->pbl_kref, destroy_pblpool); in c4iw_pblpool_free()
304 rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); in c4iw_pblpool_create()
305 if (!rdev->pbl_pool) in c4iw_pblpool_create()
306 return -ENOMEM; in c4iw_pblpool_create()
308 pbl_start = rdev->lldi.vr->pbl.start; in c4iw_pblpool_create()
309 pbl_chunk = rdev->lldi.vr->pbl.size; in c4iw_pblpool_create()
313 pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk); in c4iw_pblpool_create()
314 if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) { in c4iw_pblpool_create()
315 pr_debug("failed to add PBL chunk (%x/%x)\n", in c4iw_pblpool_create()
318 pr_warn("Failed to add all PBL chunks (%x/%x)\n", in c4iw_pblpool_create()
319 pbl_start, pbl_top - pbl_start); in c4iw_pblpool_create()
324 pr_debug("added PBL chunk (%x/%x)\n", in c4iw_pblpool_create()
335 kref_put(&rdev->pbl_kref, destroy_pblpool); in c4iw_pblpool_destroy()
346 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); in c4iw_rqtpool_alloc()
347 pr_debug("addr 0x%x size %d\n", (u32)addr, size << 6); in c4iw_rqtpool_alloc()
350 pci_name(rdev->lldi.pdev)); in c4iw_rqtpool_alloc()
351 mutex_lock(&rdev->stats.lock); in c4iw_rqtpool_alloc()
353 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); in c4iw_rqtpool_alloc()
354 if (rdev->stats.rqt.cur > rdev->stats.rqt.max) in c4iw_rqtpool_alloc()
355 rdev->stats.rqt.max = rdev->stats.rqt.cur; in c4iw_rqtpool_alloc()
356 kref_get(&rdev->rqt_kref); in c4iw_rqtpool_alloc()
358 rdev->stats.rqt.fail++; in c4iw_rqtpool_alloc()
359 mutex_unlock(&rdev->stats.lock); in c4iw_rqtpool_alloc()
368 gen_pool_destroy(rdev->rqt_pool); in destroy_rqtpool()
369 complete(&rdev->rqt_compl); in destroy_rqtpool()
374 pr_debug("addr 0x%x size %d\n", addr, size << 6); in c4iw_rqtpool_free()
375 mutex_lock(&rdev->stats.lock); in c4iw_rqtpool_free()
376 rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); in c4iw_rqtpool_free()
377 mutex_unlock(&rdev->stats.lock); in c4iw_rqtpool_free()
378 gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); in c4iw_rqtpool_free()
379 kref_put(&rdev->rqt_kref, destroy_rqtpool); in c4iw_rqtpool_free()
387 rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1); in c4iw_rqtpool_create()
388 if (!rdev->rqt_pool) in c4iw_rqtpool_create()
389 return -ENOMEM; in c4iw_rqtpool_create()
395 if (rdev->lldi.vr->srq.size) in c4iw_rqtpool_create()
398 rqt_start = rdev->lldi.vr->rq.start + skip; in c4iw_rqtpool_create()
399 rqt_chunk = rdev->lldi.vr->rq.size - skip; in c4iw_rqtpool_create()
403 rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk); in c4iw_rqtpool_create()
404 if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) { in c4iw_rqtpool_create()
405 pr_debug("failed to add RQT chunk (%x/%x)\n", in c4iw_rqtpool_create()
408 pr_warn("Failed to add all RQT chunks (%x/%x)\n", in c4iw_rqtpool_create()
409 rqt_start, rqt_top - rqt_start); in c4iw_rqtpool_create()
414 pr_debug("added RQT chunk (%x/%x)\n", in c4iw_rqtpool_create()
424 kref_put(&rdev->rqt_kref, destroy_rqtpool); in c4iw_rqtpool_destroy()
431 idx = c4iw_id_alloc(&rdev->resource.srq_table); in c4iw_alloc_srq_idx()
432 mutex_lock(&rdev->stats.lock); in c4iw_alloc_srq_idx()
433 if (idx == -1) { in c4iw_alloc_srq_idx()
434 rdev->stats.srqt.fail++; in c4iw_alloc_srq_idx()
435 mutex_unlock(&rdev->stats.lock); in c4iw_alloc_srq_idx()
436 return -ENOMEM; in c4iw_alloc_srq_idx()
438 rdev->stats.srqt.cur++; in c4iw_alloc_srq_idx()
439 if (rdev->stats.srqt.cur > rdev->stats.srqt.max) in c4iw_alloc_srq_idx()
440 rdev->stats.srqt.max = rdev->stats.srqt.cur; in c4iw_alloc_srq_idx()
441 mutex_unlock(&rdev->stats.lock); in c4iw_alloc_srq_idx()
447 c4iw_id_free(&rdev->resource.srq_table, idx); in c4iw_free_srq_idx()
448 mutex_lock(&rdev->stats.lock); in c4iw_free_srq_idx()
449 rdev->stats.srqt.cur--; in c4iw_free_srq_idx()
450 mutex_unlock(&rdev->stats.lock); in c4iw_free_srq_idx()
454 * On-Chip QP Memory.
460 unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); in c4iw_ocqp_pool_alloc()
461 pr_debug("addr 0x%x size %d\n", (u32)addr, size); in c4iw_ocqp_pool_alloc()
463 mutex_lock(&rdev->stats.lock); in c4iw_ocqp_pool_alloc()
464 rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT); in c4iw_ocqp_pool_alloc()
465 if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max) in c4iw_ocqp_pool_alloc()
466 rdev->stats.ocqp.max = rdev->stats.ocqp.cur; in c4iw_ocqp_pool_alloc()
467 mutex_unlock(&rdev->stats.lock); in c4iw_ocqp_pool_alloc()
474 pr_debug("addr 0x%x size %d\n", addr, size); in c4iw_ocqp_pool_free()
475 mutex_lock(&rdev->stats.lock); in c4iw_ocqp_pool_free()
476 rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT); in c4iw_ocqp_pool_free()
477 mutex_unlock(&rdev->stats.lock); in c4iw_ocqp_pool_free()
478 gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size); in c4iw_ocqp_pool_free()
485 rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1); in c4iw_ocqp_pool_create()
486 if (!rdev->ocqp_pool) in c4iw_ocqp_pool_create()
487 return -ENOMEM; in c4iw_ocqp_pool_create()
489 start = rdev->lldi.vr->ocq.start; in c4iw_ocqp_pool_create()
490 chunk = rdev->lldi.vr->ocq.size; in c4iw_ocqp_pool_create()
494 chunk = min(top - start + 1, chunk); in c4iw_ocqp_pool_create()
495 if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) { in c4iw_ocqp_pool_create()
496 pr_debug("failed to add OCQP chunk (%x/%x)\n", in c4iw_ocqp_pool_create()
499 pr_warn("Failed to add all OCQP chunks (%x/%x)\n", in c4iw_ocqp_pool_create()
500 start, top - start); in c4iw_ocqp_pool_create()
505 pr_debug("added OCQP chunk (%x/%x)\n", in c4iw_ocqp_pool_create()
515 gen_pool_destroy(rdev->ocqp_pool); in c4iw_ocqp_pool_destroy()