/linux-6.12.1/drivers/gpu/drm/i915/gt/ |
D | intel_timeline.c | 40 struct intel_timeline *tl = in __timeline_retire() local 41 container_of(active, typeof(*tl), active); in __timeline_retire() 43 i915_vma_unpin(tl->hwsp_ggtt); in __timeline_retire() 44 intel_timeline_put(tl); in __timeline_retire() 49 struct intel_timeline *tl = in __timeline_active() local 50 container_of(active, typeof(*tl), active); in __timeline_active() 52 __i915_vma_pin(tl->hwsp_ggtt); in __timeline_active() 53 intel_timeline_get(tl); in __timeline_active() 171 struct intel_timeline *tl; in intel_timeline_create_from_engine() local 173 tl = __intel_timeline_create(engine->gt, hwsp, offset); in intel_timeline_create_from_engine() [all …]
|
D | selftest_timeline.c | 27 static struct page *hwsp_page(struct intel_timeline *tl) in hwsp_page() argument 29 struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj; in hwsp_page() 35 static unsigned long hwsp_cacheline(struct intel_timeline *tl) in hwsp_cacheline() argument 37 unsigned long address = (unsigned long)page_address(hwsp_page(tl)); in hwsp_cacheline() 39 return (address + offset_in_page(tl->hwsp_offset)) / TIMELINE_SEQNO_BYTES; in hwsp_cacheline() 42 static int selftest_tl_pin(struct intel_timeline *tl) in selftest_tl_pin() argument 49 err = i915_gem_object_lock(tl->hwsp_ggtt->obj, &ww); in selftest_tl_pin() 51 err = intel_timeline_pin(tl, &ww); in selftest_tl_pin() 79 struct intel_timeline *tl) in __mock_hwsp_record() argument 81 tl = xchg(&state->history[idx], tl); in __mock_hwsp_record() [all …]
|
D | intel_gt_requests.c | 17 static bool retire_requests(struct intel_timeline *tl) in retire_requests() argument 21 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests() 26 return !i915_active_fence_isset(&tl->last_request); in retire_requests() 64 struct intel_timeline *tl = xchg(&engine->retire, NULL); in engine_retire() local 67 struct intel_timeline *next = xchg(&tl->retire, NULL); in engine_retire() 77 if (mutex_trylock(&tl->mutex)) { in engine_retire() 78 retire_requests(tl); in engine_retire() 79 mutex_unlock(&tl->mutex); in engine_retire() 81 intel_timeline_put(tl); in engine_retire() 84 tl = ptr_mask_bits(next, 1); in engine_retire() [all …]
|
D | intel_timeline.h | 45 static inline int __intel_timeline_sync_set(struct intel_timeline *tl, in __intel_timeline_sync_set() argument 48 return i915_syncmap_set(&tl->sync, context, seqno); in __intel_timeline_sync_set() 51 static inline int intel_timeline_sync_set(struct intel_timeline *tl, in intel_timeline_sync_set() argument 54 return __intel_timeline_sync_set(tl, fence->context, fence->seqno); in intel_timeline_sync_set() 57 static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl, in __intel_timeline_sync_is_later() argument 60 return i915_syncmap_is_later(&tl->sync, context, seqno); in __intel_timeline_sync_is_later() 63 static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl, in intel_timeline_sync_is_later() argument 66 return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno); in intel_timeline_sync_is_later() 69 void __intel_timeline_pin(struct intel_timeline *tl); 70 int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww); [all …]
|
D | selftest_context.c | 17 struct intel_timeline *tl = i915_request_timeline(rq); in request_sync() local 21 intel_timeline_get(tl); in request_sync() 35 lockdep_unpin_lock(&tl->mutex, rq->cookie); in request_sync() 36 mutex_unlock(&tl->mutex); in request_sync() 39 intel_timeline_put(tl); in request_sync() 46 struct intel_timeline *tl = ce->timeline; in context_sync() local 49 mutex_lock(&tl->mutex); in context_sync() 54 if (list_empty(&tl->requests)) in context_sync() 57 rq = list_last_entry(&tl->requests, typeof(*rq), link); in context_sync() 68 mutex_unlock(&tl->mutex); in context_sync()
|
D | intel_context.h | 251 struct intel_timeline *tl = ce->timeline; in intel_context_timeline_lock() local 255 err = mutex_lock_interruptible_nested(&tl->mutex, 0); in intel_context_timeline_lock() 257 err = mutex_lock_interruptible_nested(&tl->mutex, in intel_context_timeline_lock() 260 err = mutex_lock_interruptible(&tl->mutex); in intel_context_timeline_lock() 264 return tl; in intel_context_timeline_lock() 267 static inline void intel_context_timeline_unlock(struct intel_timeline *tl) in intel_context_timeline_unlock() argument 268 __releases(&tl->mutex) in intel_context_timeline_unlock() 270 mutex_unlock(&tl->mutex); in intel_context_timeline_unlock()
|
D | mock_engine.c | 16 static int mock_timeline_pin(struct intel_timeline *tl) in mock_timeline_pin() argument 20 if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj, NULL))) in mock_timeline_pin() 23 err = intel_timeline_pin_map(tl); in mock_timeline_pin() 24 i915_gem_object_unlock(tl->hwsp_ggtt->obj); in mock_timeline_pin() 28 atomic_inc(&tl->pin_count); in mock_timeline_pin() 32 static void mock_timeline_unpin(struct intel_timeline *tl) in mock_timeline_unpin() argument 34 GEM_BUG_ON(!atomic_read(&tl->pin_count)); in mock_timeline_unpin() 35 atomic_dec(&tl->pin_count); in mock_timeline_unpin()
|
/linux-6.12.1/fs/smb/client/ |
D | dfs_cache.h | 55 dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl, in dfs_cache_get_next_tgt() argument 58 if (!tl || !tl->tl_numtgts || list_empty(&tl->tl_list) || in dfs_cache_get_next_tgt() 59 !it || list_is_last(&it->it_list, &tl->tl_list)) in dfs_cache_get_next_tgt() 65 dfs_cache_get_tgt_iterator(struct dfs_cache_tgt_list *tl) in dfs_cache_get_tgt_iterator() argument 67 if (!tl) in dfs_cache_get_tgt_iterator() 69 return list_first_entry_or_null(&tl->tl_list, in dfs_cache_get_tgt_iterator() 74 static inline void dfs_cache_free_tgts(struct dfs_cache_tgt_list *tl) in dfs_cache_free_tgts() argument 78 if (!tl || !tl->tl_numtgts || list_empty(&tl->tl_list)) in dfs_cache_free_tgts() 80 list_for_each_entry_safe(it, nit, &tl->tl_list, it_list) { in dfs_cache_free_tgts() 85 tl->tl_numtgts = 0; in dfs_cache_free_tgts() [all …]
|
D | dfs.h | 23 struct dfs_cache_tgt_list tl; member 41 #define ref_walk_tl(w) (&ref_walk_cur(w)->tl) 64 dfs_cache_free_tgts(&ref->tl); in __ref_walk_free() 100 tit = dfs_cache_get_tgt_iterator(&ref->tl); in ref_walk_next_tgt() 102 tit = dfs_cache_get_next_tgt(&ref->tl, ref->tit); in ref_walk_next_tgt() 149 struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tl) in dfs_get_referral() argument 156 cifs_remap(cifs_sb), path, ref, tl); in dfs_get_referral()
|
D | dfs.c | 441 struct dfs_cache_tgt_list *tl) in __tree_connect_dfs_target() argument 451 tit = dfs_cache_get_tgt_iterator(tl); in __tree_connect_dfs_target() 458 for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) { in __tree_connect_dfs_target() 506 dfs_cache_free_tgts(tl); in __tree_connect_dfs_target() 510 list_replace_init(&ntl.tl_list, &tl->tl_list); in __tree_connect_dfs_target() 526 struct dfs_cache_tgt_list *tl) in tree_connect_dfs_target() argument 534 rc = __tree_connect_dfs_target(xid, tcon, cifs_sb, tree, islink, tl); in tree_connect_dfs_target() 545 dfs_cache_free_tgts(tl); in tree_connect_dfs_target() 554 DFS_CACHE_TGT_LIST(tl); in cifs_tree_connect() 604 dfs_cache_noreq_find(server->leaf_fullpath + 1, &ref, &tl)) { in cifs_tree_connect() [all …]
|
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/ |
D | nfp_net_debugdump.c | 63 struct nfp_dump_tl_hdr tl; member 69 struct nfp_dump_tl_hdr tl; member 75 struct nfp_dump_tl_hdr tl; member 83 struct nfp_dump_tl_hdr tl; member 92 struct nfp_dump_tl_hdr tl; member 97 struct nfp_dump_tl_hdr tl; member 117 typedef int (*nfp_tlv_visit)(struct nfp_pf *pf, struct nfp_dump_tl *tl, 125 struct nfp_dump_tl *tl; in nfp_traverse_tlvs() local 130 while (remaining >= sizeof(*tl)) { in nfp_traverse_tlvs() 131 tl = p; in nfp_traverse_tlvs() [all …]
|
/linux-6.12.1/crypto/ |
D | vmac.c | 151 int i; u64 th, tl; \ 154 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ 156 ADD128(rh, rl, th, tl); \ 162 int i; u64 th, tl; \ 165 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ 167 ADD128(rh, rl, th, tl); \ 168 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ 170 ADD128(rh1, rl1, th, tl); \ 177 int i; u64 th, tl; \ 180 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ [all …]
|
/linux-6.12.1/drivers/isdn/mISDN/ |
D | fsm.c | 98 struct FsmTimer *ft = from_timer(ft, t, tl); in FsmExpireTimer() 114 timer_setup(&ft->tl, FsmExpireTimer, 0); in mISDN_FsmInitTimer() 126 del_timer(&ft->tl); in mISDN_FsmDelTimer() 141 if (timer_pending(&ft->tl)) { in mISDN_FsmAddTimer() 152 ft->tl.expires = jiffies + (millisec * HZ) / 1000; in mISDN_FsmAddTimer() 153 add_timer(&ft->tl); in mISDN_FsmAddTimer() 169 if (timer_pending(&ft->tl)) in mISDN_FsmRestartTimer() 170 del_timer(&ft->tl); in mISDN_FsmRestartTimer() 173 ft->tl.expires = jiffies + (millisec * HZ) / 1000; in mISDN_FsmRestartTimer() 174 add_timer(&ft->tl); in mISDN_FsmRestartTimer()
|
D | timerdev.c | 39 struct timer_list tl; member 77 timer_shutdown_sync(&timer->tl); in mISDN_close() 158 struct mISDNtimer *timer = from_timer(timer, t, tl); in dev_expire_timer() 183 timer_setup(&timer->tl, dev_expire_timer, 0); in misdn_add_timer() 189 timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000); in misdn_add_timer() 190 add_timer(&timer->tl); in misdn_add_timer() 207 timer_shutdown_sync(&timer->tl); in misdn_del_timer()
|
/linux-6.12.1/drivers/s390/net/ |
D | fsm.c | 135 fsm_timer *this = from_timer(this, t, tl); in fsm_expire_timer() 151 timer_setup(&this->tl, fsm_expire_timer, 0); in fsm_settimer() 161 del_timer(&this->tl); in fsm_deltimer() 173 timer_setup(&this->tl, fsm_expire_timer, 0); in fsm_addtimer() 176 this->tl.expires = jiffies + (millisec * HZ) / 1000; in fsm_addtimer() 177 add_timer(&this->tl); in fsm_addtimer() 191 del_timer(&this->tl); in fsm_modtimer() 192 timer_setup(&this->tl, fsm_expire_timer, 0); in fsm_modtimer() 195 this->tl.expires = jiffies + (millisec * HZ) / 1000; in fsm_modtimer() 196 add_timer(&this->tl); in fsm_modtimer()
|
/linux-6.12.1/sound/soc/intel/boards/ |
D | sof_cirrus_common.c | 22 SOC_DAPM_PIN_SWITCH("TL Spk"), 29 SND_SOC_DAPM_SPK("TL Spk", NULL), 37 {"TL Spk", NULL, "TL SPK"}, 79 * TL/WL: ASPRX1 on slot 0, ASPRX2 on slot 1 (default) 87 {.rx = {0, 1}}, /* TL */ 143 static const char * const cs35l41_name_prefixes[] = { "WL", "WR", "TL", "TR" }; 150 * UID 0x2 -> TL 152 * Note: If there are less than 4 Amps, UIDs still map to WL/WR/TL/TR. Dynamic code will only create
|
/linux-6.12.1/fs/ext4/ |
D | fast_commit.c | 707 struct ext4_fc_tl tl; in ext4_fc_reserve_space() local 745 tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD); in ext4_fc_reserve_space() 746 tl.fc_len = cpu_to_le16(remaining); in ext4_fc_reserve_space() 747 memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN); in ext4_fc_reserve_space() 772 struct ext4_fc_tl tl; in ext4_fc_write_tail() local 787 tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_TAIL); in ext4_fc_write_tail() 788 tl.fc_len = cpu_to_le16(bsize - off + sizeof(struct ext4_fc_tail)); in ext4_fc_write_tail() 791 memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN); in ext4_fc_write_tail() 815 struct ext4_fc_tl tl; in ext4_fc_add_tlv() local 822 tl.fc_tag = cpu_to_le16(tag); in ext4_fc_add_tlv() [all …]
|
/linux-6.12.1/arch/sparc/kernel/ |
D | etrap_64.S | 208 etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. 217 * 0x60 TL 221 rdpr %tl, %g1 223 wrpr %g0, 1, %tl 233 wrpr %g0, 2, %tl 248 wrpr %g0, 3, %tl 258 wrpr %g0, 4, %tl 271 wrpr %g0, 1, %tl
|
D | dtlb_prot.S | 12 * [TL == 0] 1) User stores to readonly pages. 13 * [TL == 0] 2) Nucleus stores to user readonly pages. 14 * [TL > 0] 3) Nucleus stores to user readonly stack frame. 23 rdpr %tl, %g1 ! Need a winfixup?
|
D | cherrs.S | 174 /* If we take one of these traps when tl >= 1, then we 182 rdpr %tl, %g1 ! Save original trap level 185 1: wrpr %g2, %tl ! Set trap level to check 189 wrpr %g1, %tl ! Restore original trap level 194 wrpr %g1, %tl ! Restore original trap level 233 rdpr %tl, %g1 ! Save original trap level 236 1: wrpr %g2, %tl ! Set trap level to check 240 wrpr %g1, %tl ! Restore original trap level 245 wrpr %g1, %tl ! Restore original trap level 295 * %g1: (TL>=0) ? 1 : 0 [all …]
|
/linux-6.12.1/kernel/sched/ |
D | topology.c | 1588 sd_init(struct sched_domain_topology_level *tl, in sd_init() argument 1592 struct sd_data *sdd = &tl->data; in sd_init() 1601 sched_domains_curr_level = tl->numa_level; in sd_init() 1604 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init() 1606 if (tl->sd_flags) in sd_init() 1607 sd_flags = (*tl->sd_flags)(); in sd_init() 1639 .name = tl->name, in sd_init() 1644 cpumask_and(sd_span, cpu_map, tl->mask(cpu)); in sd_init() 1673 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { in sd_init() 1722 #define for_each_sd_topology(tl) \ argument [all …]
|
/linux-6.12.1/drivers/gpu/drm/i915/ |
D | i915_request.c | 420 struct intel_timeline * const tl = i915_request_timeline(rq); in i915_request_retire_upto() local 427 tmp = list_first_entry(&tl->requests, typeof(*tmp), link); in i915_request_retire_upto() 824 static void retire_requests(struct intel_timeline *tl) in retire_requests() argument 828 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests() 834 request_alloc_slow(struct intel_timeline *tl, in request_alloc_slow() argument 849 if (list_empty(&tl->requests)) in request_alloc_slow() 853 rq = list_first_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow() 862 rq = list_last_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow() 866 retire_requests(tl); in request_alloc_slow() 896 struct intel_timeline *tl = ce->timeline; in __i915_request_create() local [all …]
|
/linux-6.12.1/drivers/net/ethernet/qlogic/qed/ |
D | qed_vf.h | 55 struct channel_tlv tl; member 62 struct channel_tlv tl; member 74 struct channel_tlv tl; member 123 struct channel_tlv tl; member 232 struct channel_tlv tl; member 345 struct channel_tlv tl; member 353 struct channel_tlv tl; member 359 struct channel_tlv tl; member 365 struct channel_tlv tl; member 377 struct channel_tlv tl; member [all …]
|
/linux-6.12.1/arch/s390/include/asm/ |
D | dat-bits.h | 24 unsigned long tl : 2; /* Region- or Segment-Table Length */ member 46 unsigned long tl : 2; /* Region-Second-Table Length */ member 61 unsigned long tl : 2; /* Region-Third-Table Length */ member 75 unsigned long tl : 2; /* Segment-Table Length */ member
|
/linux-6.12.1/drivers/nvme/target/ |
D | fabrics-cmd-auth.c | 187 u32 tl; in nvmet_execute_auth_send() local 209 tl = le32_to_cpu(req->cmd->auth_send.tl); in nvmet_execute_auth_send() 210 if (!tl) { in nvmet_execute_auth_send() 213 offsetof(struct nvmf_auth_send_command, tl); in nvmet_execute_auth_send() 216 if (!nvmet_check_transfer_len(req, tl)) { in nvmet_execute_auth_send() 217 pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl); in nvmet_execute_auth_send() 221 d = kmalloc(tl, GFP_KERNEL); in nvmet_execute_auth_send() 227 status = nvmet_copy_from_sgl(req, 0, d, tl); in nvmet_execute_auth_send()
|