Lines Matching full:ent
115 struct mlx5_ib_dev *dev = async_create->ent->dev; in mlx5_ib_create_mkey_cb()
128 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
150 static int push_mkey_locked(struct mlx5_cache_ent *ent, u32 mkey) in push_mkey_locked() argument
152 unsigned long tmp = ent->mkeys_queue.ci % NUM_MKEYS_PER_PAGE; in push_mkey_locked()
155 lockdep_assert_held(&ent->mkeys_queue.lock); in push_mkey_locked()
156 if (ent->mkeys_queue.ci >= in push_mkey_locked()
157 ent->mkeys_queue.num_pages * NUM_MKEYS_PER_PAGE) { in push_mkey_locked()
161 ent->mkeys_queue.num_pages++; in push_mkey_locked()
162 list_add_tail(&page->list, &ent->mkeys_queue.pages_list); in push_mkey_locked()
164 page = list_last_entry(&ent->mkeys_queue.pages_list, in push_mkey_locked()
169 ent->mkeys_queue.ci++; in push_mkey_locked()
173 static int pop_mkey_locked(struct mlx5_cache_ent *ent) in pop_mkey_locked() argument
175 unsigned long tmp = (ent->mkeys_queue.ci - 1) % NUM_MKEYS_PER_PAGE; in pop_mkey_locked()
179 lockdep_assert_held(&ent->mkeys_queue.lock); in pop_mkey_locked()
180 last_page = list_last_entry(&ent->mkeys_queue.pages_list, in pop_mkey_locked()
184 ent->mkeys_queue.ci--; in pop_mkey_locked()
185 if (ent->mkeys_queue.num_pages > 1 && !tmp) { in pop_mkey_locked()
187 ent->mkeys_queue.num_pages--; in pop_mkey_locked()
197 struct mlx5_cache_ent *ent = mkey_out->ent; in create_mkey_callback() local
198 struct mlx5_ib_dev *dev = ent->dev; in create_mkey_callback()
204 spin_lock_irqsave(&ent->mkeys_queue.lock, flags); in create_mkey_callback()
205 ent->pending--; in create_mkey_callback()
207 spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags); in create_mkey_callback()
216 spin_lock_irqsave(&ent->mkeys_queue.lock, flags); in create_mkey_callback()
217 push_mkey_locked(ent, mkey_out->mkey); in create_mkey_callback()
218 ent->pending--; in create_mkey_callback()
220 queue_adjust_cache_locked(ent); in create_mkey_callback()
221 spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags); in create_mkey_callback()
244 static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) in set_cache_mkc() argument
246 set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0, in set_cache_mkc()
247 ent->dev->umrc.pd); in set_cache_mkc()
250 MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); in set_cache_mkc()
252 (ent->rb_key.access_mode >> 2) & 0x7); in set_cache_mkc()
253 MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats); in set_cache_mkc()
256 get_mkc_octo_size(ent->rb_key.access_mode, in set_cache_mkc()
257 ent->rb_key.ndescs)); in set_cache_mkc()
262 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) in add_keys() argument
276 set_cache_mkc(ent, mkc); in add_keys()
277 async_create->ent = ent; in add_keys()
279 spin_lock_irq(&ent->mkeys_queue.lock); in add_keys()
280 if (ent->pending >= MAX_PENDING_REG_MR) { in add_keys()
284 ent->pending++; in add_keys()
285 spin_unlock_irq(&ent->mkeys_queue.lock); in add_keys()
289 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); in add_keys()
297 spin_lock_irq(&ent->mkeys_queue.lock); in add_keys()
298 ent->pending--; in add_keys()
300 spin_unlock_irq(&ent->mkeys_queue.lock); in add_keys()
306 static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey) in create_cache_mkey() argument
317 set_cache_mkc(ent, mkc); in create_cache_mkey()
319 err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen); in create_cache_mkey()
323 WRITE_ONCE(ent->dev->cache.last_add, jiffies); in create_cache_mkey()
329 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) in remove_cache_mr_locked() argument
333 lockdep_assert_held(&ent->mkeys_queue.lock); in remove_cache_mr_locked()
334 if (!ent->mkeys_queue.ci) in remove_cache_mr_locked()
336 mkey = pop_mkey_locked(ent); in remove_cache_mr_locked()
337 spin_unlock_irq(&ent->mkeys_queue.lock); in remove_cache_mr_locked()
338 mlx5_core_destroy_mkey(ent->dev->mdev, mkey); in remove_cache_mr_locked()
339 spin_lock_irq(&ent->mkeys_queue.lock); in remove_cache_mr_locked()
342 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, in resize_available_mrs() argument
344 __acquires(&ent->mkeys_queue.lock) __releases(&ent->mkeys_queue.lock) in resize_available_mrs()
348 lockdep_assert_held(&ent->mkeys_queue.lock); in resize_available_mrs()
352 target = ent->limit * 2; in resize_available_mrs()
353 if (target == ent->pending + ent->mkeys_queue.ci) in resize_available_mrs()
355 if (target > ent->pending + ent->mkeys_queue.ci) { in resize_available_mrs()
356 u32 todo = target - (ent->pending + ent->mkeys_queue.ci); in resize_available_mrs()
358 spin_unlock_irq(&ent->mkeys_queue.lock); in resize_available_mrs()
359 err = add_keys(ent, todo); in resize_available_mrs()
362 spin_lock_irq(&ent->mkeys_queue.lock); in resize_available_mrs()
369 remove_cache_mr_locked(ent); in resize_available_mrs()
377 struct mlx5_cache_ent *ent = filp->private_data; in size_write() local
390 spin_lock_irq(&ent->mkeys_queue.lock); in size_write()
391 if (target < ent->in_use) { in size_write()
395 target = target - ent->in_use; in size_write()
396 if (target < ent->limit || target > ent->limit*2) { in size_write()
400 err = resize_available_mrs(ent, target, false); in size_write()
403 spin_unlock_irq(&ent->mkeys_queue.lock); in size_write()
408 spin_unlock_irq(&ent->mkeys_queue.lock); in size_write()
415 struct mlx5_cache_ent *ent = filp->private_data; in size_read() local
420 ent->mkeys_queue.ci + ent->in_use); in size_read()
437 struct mlx5_cache_ent *ent = filp->private_data; in limit_write() local
449 spin_lock_irq(&ent->mkeys_queue.lock); in limit_write()
450 ent->limit = var; in limit_write()
451 err = resize_available_mrs(ent, 0, true); in limit_write()
452 spin_unlock_irq(&ent->mkeys_queue.lock); in limit_write()
461 struct mlx5_cache_ent *ent = filp->private_data; in limit_read() local
465 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
481 struct mlx5_cache_ent *ent; in someone_adding() local
487 ent = rb_entry(node, struct mlx5_cache_ent, node); in someone_adding()
488 spin_lock_irq(&ent->mkeys_queue.lock); in someone_adding()
489 ret = ent->mkeys_queue.ci < ent->limit; in someone_adding()
490 spin_unlock_irq(&ent->mkeys_queue.lock); in someone_adding()
505 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) in queue_adjust_cache_locked() argument
507 lockdep_assert_held(&ent->mkeys_queue.lock); in queue_adjust_cache_locked()
509 if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp) in queue_adjust_cache_locked()
511 if (ent->mkeys_queue.ci < ent->limit) { in queue_adjust_cache_locked()
512 ent->fill_to_high_water = true; in queue_adjust_cache_locked()
513 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
514 } else if (ent->fill_to_high_water && in queue_adjust_cache_locked()
515 ent->mkeys_queue.ci + ent->pending < 2 * ent->limit) { in queue_adjust_cache_locked()
520 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
521 } else if (ent->mkeys_queue.ci == 2 * ent->limit) { in queue_adjust_cache_locked()
522 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
523 } else if (ent->mkeys_queue.ci > 2 * ent->limit) { in queue_adjust_cache_locked()
525 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
526 if (ent->pending) in queue_adjust_cache_locked()
527 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, in queue_adjust_cache_locked()
530 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
534 static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent) in clean_keys() argument
538 spin_lock_irq(&ent->mkeys_queue.lock); in clean_keys()
539 while (ent->mkeys_queue.ci) { in clean_keys()
540 mkey = pop_mkey_locked(ent); in clean_keys()
541 spin_unlock_irq(&ent->mkeys_queue.lock); in clean_keys()
543 spin_lock_irq(&ent->mkeys_queue.lock); in clean_keys()
545 ent->tmp_cleanup_scheduled = false; in clean_keys()
546 spin_unlock_irq(&ent->mkeys_queue.lock); in clean_keys()
549 static void __cache_work_func(struct mlx5_cache_ent *ent) in __cache_work_func() argument
551 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
555 spin_lock_irq(&ent->mkeys_queue.lock); in __cache_work_func()
556 if (ent->disabled) in __cache_work_func()
559 if (ent->fill_to_high_water && in __cache_work_func()
560 ent->mkeys_queue.ci + ent->pending < 2 * ent->limit && in __cache_work_func()
562 spin_unlock_irq(&ent->mkeys_queue.lock); in __cache_work_func()
563 err = add_keys(ent, 1); in __cache_work_func()
564 spin_lock_irq(&ent->mkeys_queue.lock); in __cache_work_func()
565 if (ent->disabled) in __cache_work_func()
578 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
582 } else if (ent->mkeys_queue.ci > 2 * ent->limit) { in __cache_work_func()
597 spin_unlock_irq(&ent->mkeys_queue.lock); in __cache_work_func()
601 spin_lock_irq(&ent->mkeys_queue.lock); in __cache_work_func()
602 if (ent->disabled) in __cache_work_func()
605 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
608 remove_cache_mr_locked(ent); in __cache_work_func()
609 queue_adjust_cache_locked(ent); in __cache_work_func()
612 spin_unlock_irq(&ent->mkeys_queue.lock); in __cache_work_func()
617 struct mlx5_cache_ent *ent; in delayed_cache_work_func() local
619 ent = container_of(work, struct mlx5_cache_ent, dwork.work); in delayed_cache_work_func()
621 if (ent->is_tmp) in delayed_cache_work_func()
622 clean_keys(ent->dev, ent); in delayed_cache_work_func()
624 __cache_work_func(ent); in delayed_cache_work_func()
653 struct mlx5_cache_ent *ent) in mlx5_cache_ent_insert() argument
663 cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key); in mlx5_cache_ent_insert()
673 rb_link_node(&ent->node, parent, new); in mlx5_cache_ent_insert()
674 rb_insert_color(&ent->node, &cache->rb_root); in mlx5_cache_ent_insert()
689 * Find the smallest ent with order >= requested_order. in mkey_cache_ent_from_rb_key()
721 struct mlx5_cache_ent *ent, in _mlx5_mr_cache_alloc() argument
731 spin_lock_irq(&ent->mkeys_queue.lock); in _mlx5_mr_cache_alloc()
732 ent->in_use++; in _mlx5_mr_cache_alloc()
734 if (!ent->mkeys_queue.ci) { in _mlx5_mr_cache_alloc()
735 queue_adjust_cache_locked(ent); in _mlx5_mr_cache_alloc()
736 ent->miss++; in _mlx5_mr_cache_alloc()
737 spin_unlock_irq(&ent->mkeys_queue.lock); in _mlx5_mr_cache_alloc()
738 err = create_cache_mkey(ent, &mr->mmkey.key); in _mlx5_mr_cache_alloc()
740 spin_lock_irq(&ent->mkeys_queue.lock); in _mlx5_mr_cache_alloc()
741 ent->in_use--; in _mlx5_mr_cache_alloc()
742 spin_unlock_irq(&ent->mkeys_queue.lock); in _mlx5_mr_cache_alloc()
747 mr->mmkey.key = pop_mkey_locked(ent); in _mlx5_mr_cache_alloc()
748 queue_adjust_cache_locked(ent); in _mlx5_mr_cache_alloc()
749 spin_unlock_irq(&ent->mkeys_queue.lock); in _mlx5_mr_cache_alloc()
751 mr->mmkey.cache_ent = ent; in _mlx5_mr_cache_alloc()
753 mr->mmkey.rb_key = ent->rb_key; in _mlx5_mr_cache_alloc()
792 struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key); in mlx5_mr_cache_alloc() local
794 if (!ent) in mlx5_mr_cache_alloc()
797 return _mlx5_mr_cache_alloc(dev, ent, access_flags); in mlx5_mr_cache_alloc()
810 struct mlx5_cache_ent *ent) in mlx5_mkey_cache_debugfs_add_ent() argument
812 int order = order_base_2(ent->rb_key.ndescs); in mlx5_mkey_cache_debugfs_add_ent()
818 if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) in mlx5_mkey_cache_debugfs_add_ent()
821 sprintf(ent->name, "%d", order); in mlx5_mkey_cache_debugfs_add_ent()
822 dir = debugfs_create_dir(ent->name, dev->cache.fs_root); in mlx5_mkey_cache_debugfs_add_ent()
823 debugfs_create_file("size", 0600, dir, ent, &size_fops); in mlx5_mkey_cache_debugfs_add_ent()
824 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); in mlx5_mkey_cache_debugfs_add_ent()
825 debugfs_create_ulong("cur", 0400, dir, &ent->mkeys_queue.ci); in mlx5_mkey_cache_debugfs_add_ent()
826 debugfs_create_u32("miss", 0600, dir, &ent->miss); in mlx5_mkey_cache_debugfs_add_ent()
847 static int mlx5r_mkeys_init(struct mlx5_cache_ent *ent) in mlx5r_mkeys_init() argument
854 INIT_LIST_HEAD(&ent->mkeys_queue.pages_list); in mlx5r_mkeys_init()
855 spin_lock_init(&ent->mkeys_queue.lock); in mlx5r_mkeys_init()
856 list_add_tail(&page->list, &ent->mkeys_queue.pages_list); in mlx5r_mkeys_init()
857 ent->mkeys_queue.num_pages++; in mlx5r_mkeys_init()
861 static void mlx5r_mkeys_uninit(struct mlx5_cache_ent *ent) in mlx5r_mkeys_uninit() argument
865 WARN_ON(ent->mkeys_queue.ci || ent->mkeys_queue.num_pages > 1); in mlx5r_mkeys_uninit()
866 page = list_last_entry(&ent->mkeys_queue.pages_list, in mlx5r_mkeys_uninit()
877 struct mlx5_cache_ent *ent; in mlx5r_cache_create_ent_locked() local
881 ent = kzalloc(sizeof(*ent), GFP_KERNEL); in mlx5r_cache_create_ent_locked()
882 if (!ent) in mlx5r_cache_create_ent_locked()
885 ret = mlx5r_mkeys_init(ent); in mlx5r_cache_create_ent_locked()
888 ent->rb_key = rb_key; in mlx5r_cache_create_ent_locked()
889 ent->dev = dev; in mlx5r_cache_create_ent_locked()
890 ent->is_tmp = !persistent_entry; in mlx5r_cache_create_ent_locked()
892 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5r_cache_create_ent_locked()
894 ret = mlx5_cache_ent_insert(&dev->cache, ent); in mlx5r_cache_create_ent_locked()
907 ent->limit = dev->mdev->profile.mr_cache[order].limit; in mlx5r_cache_create_ent_locked()
909 ent->limit = 0; in mlx5r_cache_create_ent_locked()
911 mlx5_mkey_cache_debugfs_add_ent(dev, ent); in mlx5r_cache_create_ent_locked()
914 return ent; in mlx5r_cache_create_ent_locked()
916 mlx5r_mkeys_uninit(ent); in mlx5r_cache_create_ent_locked()
918 kfree(ent); in mlx5r_cache_create_ent_locked()
929 struct mlx5_cache_ent *ent; in mlx5_mkey_cache_init() local
949 ent = mlx5r_cache_create_ent_locked(dev, rb_key, true); in mlx5_mkey_cache_init()
950 if (IS_ERR(ent)) { in mlx5_mkey_cache_init()
951 ret = PTR_ERR(ent); in mlx5_mkey_cache_init()
962 ent = rb_entry(node, struct mlx5_cache_ent, node); in mlx5_mkey_cache_init()
963 spin_lock_irq(&ent->mkeys_queue.lock); in mlx5_mkey_cache_init()
964 queue_adjust_cache_locked(ent); in mlx5_mkey_cache_init()
965 spin_unlock_irq(&ent->mkeys_queue.lock); in mlx5_mkey_cache_init()
980 struct mlx5_cache_ent *ent; in mlx5_mkey_cache_cleanup() local
988 ent = rb_entry(node, struct mlx5_cache_ent, node); in mlx5_mkey_cache_cleanup()
989 spin_lock_irq(&ent->mkeys_queue.lock); in mlx5_mkey_cache_cleanup()
990 ent->disabled = true; in mlx5_mkey_cache_cleanup()
991 spin_unlock_irq(&ent->mkeys_queue.lock); in mlx5_mkey_cache_cleanup()
992 cancel_delayed_work(&ent->dwork); in mlx5_mkey_cache_cleanup()
1009 ent = rb_entry(node, struct mlx5_cache_ent, node); in mlx5_mkey_cache_cleanup()
1011 clean_keys(dev, ent); in mlx5_mkey_cache_cleanup()
1012 rb_erase(&ent->node, root); in mlx5_mkey_cache_cleanup()
1013 mlx5r_mkeys_uninit(ent); in mlx5_mkey_cache_cleanup()
1014 kfree(ent); in mlx5_mkey_cache_cleanup()
1116 struct mlx5_cache_ent *ent; in alloc_cacheable_mr() local
1131 ent = mkey_cache_ent_from_rb_key(dev, rb_key); in alloc_cacheable_mr()
1136 if (!ent) { in alloc_cacheable_mr()
1147 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags); in alloc_cacheable_mr()
1954 struct mlx5_cache_ent *ent; in cache_ent_find_and_store() local
1964 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); in cache_ent_find_and_store()
1965 if (ent) { in cache_ent_find_and_store()
1966 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { in cache_ent_find_and_store()
1967 if (ent->disabled) { in cache_ent_find_and_store()
1971 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1978 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false); in cache_ent_find_and_store()
1980 if (IS_ERR(ent)) in cache_ent_find_and_store()
1981 return PTR_ERR(ent); in cache_ent_find_and_store()
1983 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
2023 struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; in mlx5_revoke_mr() local
2026 ent = mr->mmkey.cache_ent; in mlx5_revoke_mr()
2028 spin_lock_irq(&ent->mkeys_queue.lock); in mlx5_revoke_mr()
2029 if (ent->is_tmp && !ent->tmp_cleanup_scheduled) { in mlx5_revoke_mr()
2030 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, in mlx5_revoke_mr()
2032 ent->tmp_cleanup_scheduled = true; in mlx5_revoke_mr()
2034 spin_unlock_irq(&ent->mkeys_queue.lock); in mlx5_revoke_mr()
2038 if (ent) { in mlx5_revoke_mr()
2039 spin_lock_irq(&ent->mkeys_queue.lock); in mlx5_revoke_mr()
2040 ent->in_use--; in mlx5_revoke_mr()
2042 spin_unlock_irq(&ent->mkeys_queue.lock); in mlx5_revoke_mr()