/linux-6.12.1/drivers/clk/renesas/ |
D | r9a06g032-clocks.c | 47 * Helper macro RB() takes care of converting the register 55 #define RB(_reg, _bit) ((struct regbit) { \ macro 286 D_GATE(CLK_25_PG4, "clk_25_pg4", CLKOUT_D40, RB(0xe8, 9), 287 RB(0xe8, 10), RB(0xe8, 11), RB(0x00, 0), 288 RB(0x15c, 3), RB(0x00, 0), RB(0x00, 0)), 289 D_GATE(CLK_25_PG5, "clk_25_pg5", CLKOUT_D40, RB(0xe8, 12), 290 RB(0xe8, 13), RB(0xe8, 14), RB(0x00, 0), 291 RB(0x15c, 4), RB(0x00, 0), RB(0x00, 0)), 292 D_GATE(CLK_25_PG6, "clk_25_pg6", CLKOUT_D40, RB(0xe8, 15), 293 RB(0xe8, 16), RB(0xe8, 17), RB(0x00, 0), [all …]
|
/linux-6.12.1/kernel/events/ |
D | ring_buffer.c | 22 atomic_set(&handle->rb->poll, EPOLLIN); in perf_output_wakeup() 42 struct perf_buffer *rb = handle->rb; in perf_output_get_handle() local 50 (*(volatile unsigned int *)&rb->nest)++; in perf_output_get_handle() 51 handle->wakeup = local_read(&rb->wakeup); in perf_output_get_handle() 56 struct perf_buffer *rb = handle->rb; in perf_output_put_handle() local 62 * @rb->user_page->data_head. in perf_output_put_handle() 64 nest = READ_ONCE(rb->nest); in perf_output_put_handle() 66 WRITE_ONCE(rb->nest, nest - 1); in perf_output_put_handle() 73 * we must ensure the load of @rb->head happens after we've in perf_output_put_handle() 74 * incremented @rb->nest. in perf_output_put_handle() [all …]
|
D | internal.h | 62 extern void rb_free(struct perf_buffer *rb); 66 struct perf_buffer *rb; in rb_free_rcu() local 68 rb = container_of(rcu_head, struct perf_buffer, rcu_head); in rb_free_rcu() 69 rb_free(rb); in rb_free_rcu() 72 static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause) in rb_toggle_paused() argument 74 if (!pause && rb->nr_pages) in rb_toggle_paused() 75 rb->paused = 0; in rb_toggle_paused() 77 rb->paused = 1; in rb_toggle_paused() 83 extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event, 85 extern void rb_free_aux(struct perf_buffer *rb); [all …]
|
/linux-6.12.1/drivers/net/ethernet/brocade/bna/ |
D | bfa_ioc_ct.c | 49 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, 51 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, 251 void __iomem *rb; in bfa_ioc_ct_reg_init() local 254 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init() 256 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; in bfa_ioc_ct_reg_init() 257 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; in bfa_ioc_ct_reg_init() 258 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; in bfa_ioc_ct_reg_init() 261 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_ct_reg_init() 262 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init() 263 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init() [all …]
|
/linux-6.12.1/drivers/scsi/bfa/ |
D | bfa_ioc_ct.c | 185 void __iomem *rb; in bfa_ioc_ct_reg_init() local 188 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init() 190 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; in bfa_ioc_ct_reg_init() 191 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; in bfa_ioc_ct_reg_init() 192 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; in bfa_ioc_ct_reg_init() 195 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_ct_reg_init() 196 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init() 197 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init() 198 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; in bfa_ioc_ct_reg_init() 199 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; in bfa_ioc_ct_reg_init() [all …]
|
D | bfa_ioc_cb.c | 138 void __iomem *rb; in bfa_ioc_cb_reg_init() local 141 rb = bfa_ioc_bar0(ioc); in bfa_ioc_cb_reg_init() 143 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; in bfa_ioc_cb_reg_init() 144 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; in bfa_ioc_cb_reg_init() 145 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; in bfa_ioc_cb_reg_init() 148 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_cb_reg_init() 149 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_cb_reg_init() 150 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_cb_reg_init() 152 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); in bfa_ioc_cb_reg_init() 153 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); in bfa_ioc_cb_reg_init() [all …]
|
/linux-6.12.1/tools/lib/bpf/ |
D | ringbuf.c | 60 static void ringbuf_free_ring(struct ring_buffer *rb, struct ring *r) in ringbuf_free_ring() argument 63 munmap(r->consumer_pos, rb->page_size); in ringbuf_free_ring() 67 munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1)); in ringbuf_free_ring() 75 int ring_buffer__add(struct ring_buffer *rb, int map_fd, in ring_buffer__add() argument 102 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); in ring_buffer__add() 105 rb->rings = tmp; in ring_buffer__add() 107 tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events)); in ring_buffer__add() 110 rb->events = tmp; in ring_buffer__add() 115 rb->rings[rb->ring_cnt] = r; in ring_buffer__add() 123 tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); in ring_buffer__add() [all …]
|
/linux-6.12.1/tools/testing/selftests/bpf/benchs/ |
D | run_bench_ringbufs.sh | 10 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 15 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 16 summarize $b "$($RUN_RB_BENCH --rb-sampled $b)" 20 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 21 summarize $b "$($RUN_RB_BENCH --rb-b2b $b)" 22 summarize $b-sampled "$($RUN_RB_BENCH --rb-sampled --rb-b2b $b)" 27 …summarize "rb-sampled-$b" "$($RUN_RB_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rat… 31 …summarize "pb-sampled-$b" "$($RUN_RB_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rat… 35 summarize "reserve" "$($RUN_RB_BENCH --rb-b2b rb-custom)" 36 summarize "output" "$($RUN_RB_BENCH --rb-b2b --rb-use-output rb-custom)" [all …]
|
/linux-6.12.1/arch/x86/crypto/ |
D | serpent-sse2-i586-asm_32.S | 28 #define RB %xmm1 macro 513 read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); 515 K(RA, RB, RC, RD, RE, 0); 516 S0(RA, RB, RC, RD, RE); LK(RC, RB, RD, RA, RE, 1); 517 S1(RC, RB, RD, RA, RE); LK(RE, RD, RA, RC, RB, 2); 518 S2(RE, RD, RA, RC, RB); LK(RB, RD, RE, RC, RA, 3); 519 S3(RB, RD, RE, RC, RA); LK(RC, RA, RD, RB, RE, 4); 520 S4(RC, RA, RD, RB, RE); LK(RA, RD, RB, RE, RC, 5); 521 S5(RA, RD, RB, RE, RC); LK(RC, RA, RD, RE, RB, 6); 522 S6(RC, RA, RD, RE, RB); LK(RD, RB, RA, RE, RC, 7); [all …]
|
D | serpent-sse2-x86_64-asm_64.S | 636 K2(RA, RB, RC, RD, RE, 0); 637 S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); 638 S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); 639 S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); 640 S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); 641 S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); 642 S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); 643 S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); 644 S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); 645 S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); [all …]
|
D | serpent-avx-x86_64-asm_64.S | 566 K2(RA, RB, RC, RD, RE, 0); 567 S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); 568 S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); 569 S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); 570 S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); 571 S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); 572 S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); 573 S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); 574 S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); 575 S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); [all …]
|
D | serpent-avx2-asm_64.S | 566 K2(RA, RB, RC, RD, RE, 0); 567 S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); 568 S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); 569 S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); 570 S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); 571 S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); 572 S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); 573 S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); 574 S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); 575 S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); [all …]
|
/linux-6.12.1/fs/xfs/scrub/ |
D | bmap_repair.c | 97 struct xrep_bmap *rb, in xrep_bmap_discover_shared() argument 101 struct xfs_scrub *sc = rb->sc; in xrep_bmap_discover_shared() 114 rb->reflink_scan = RLS_SET_IFLAG; in xrep_bmap_discover_shared() 122 struct xrep_bmap *rb, in xrep_bmap_from_rmap() argument 134 struct xfs_scrub *sc = rb->sc; in xrep_bmap_from_rmap() 142 if (rb->reflink_scan == RLS_UNKNOWN && !unwritten) { in xrep_bmap_from_rmap() 143 error = xrep_bmap_discover_shared(rb, startblock, blockcount); in xrep_bmap_from_rmap() 154 fa = xfs_bmap_validate_extent(sc->ip, rb->whichfork, &irec); in xrep_bmap_from_rmap() 160 trace_xrep_bmap_found(sc->ip, rb->whichfork, &irec); in xrep_bmap_from_rmap() 165 error = xfarray_append(rb->bmap_records, &rbe); in xrep_bmap_from_rmap() [all …]
|
/linux-6.12.1/lib/ |
D | rbtree_test.c | 14 __param(int, nnodes, 100, "Number of nodes in the rb-tree"); 15 __param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree"); 16 __param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree"); 20 struct rb_node rb; member 39 if (key < rb_entry(parent, struct test_node, rb)->key) in insert() 45 rb_link_node(&node->rb, parent, new); in insert() 46 rb_insert_color(&node->rb, &root->rb_root); in insert() 57 if (key < rb_entry(parent, struct test_node, rb)->key) in insert_cached() 65 rb_link_node(&node->rb, parent, new); in insert_cached() 66 rb_insert_color_cached(&node->rb, root, leftmost); in insert_cached() [all …]
|
/linux-6.12.1/drivers/hid/intel-ish-hid/ishtp/ |
D | client-buffers.c | 23 struct ishtp_cl_rb *rb; in ishtp_cl_alloc_rx_ring() local 28 rb = ishtp_io_rb_init(cl); in ishtp_cl_alloc_rx_ring() 29 if (!rb) { in ishtp_cl_alloc_rx_ring() 33 ret = ishtp_io_rb_alloc_buf(rb, len); in ishtp_cl_alloc_rx_ring() 37 list_add_tail(&rb->list, &cl->free_rb_list.list); in ishtp_cl_alloc_rx_ring() 99 struct ishtp_cl_rb *rb; in ishtp_cl_free_rx_ring() local 105 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, in ishtp_cl_free_rx_ring() 107 list_del(&rb->list); in ishtp_cl_free_rx_ring() 108 kfree(rb->buffer.data); in ishtp_cl_free_rx_ring() 109 kfree(rb); in ishtp_cl_free_rx_ring() [all …]
|
/linux-6.12.1/kernel/bpf/ |
D | ringbuf.c | 80 struct bpf_ringbuf *rb; member 97 struct bpf_ringbuf *rb; in bpf_ringbuf_area_alloc() local 134 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, in bpf_ringbuf_area_alloc() 136 if (rb) { in bpf_ringbuf_area_alloc() 138 rb->pages = pages; in bpf_ringbuf_area_alloc() 139 rb->nr_pages = nr_pages; in bpf_ringbuf_area_alloc() 140 return rb; in bpf_ringbuf_area_alloc() 152 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); in bpf_ringbuf_notify() local 154 wake_up_all(&rb->waitq); in bpf_ringbuf_notify() 170 struct bpf_ringbuf *rb; in bpf_ringbuf_alloc() local [all …]
|
/linux-6.12.1/Documentation/translations/zh_CN/core-api/ |
D | rbtree.rst | 271 node = rb_entry(root->rb_node, struct interval_tree_node, rb); 274 if (node->rb.rb_left) { 276 rb_entry(node->rb.rb_left, 277 struct interval_tree_node, rb); 294 if (node->rb.rb_right) { 295 node = rb_entry(node->rb.rb_right, 296 struct interval_tree_node, rb); 311 if (node->rb.rb_left) { 312 subtree_last = rb_entry(node->rb.rb_left, 313 struct interval_tree_node, rb)->__subtree_last; [all …]
|
/linux-6.12.1/tools/testing/selftests/powerpc/stringloops/ |
D | memcmp_64.S | 15 #define rB r10 macro 121 lbz rB,0(r4) 122 subf. rC,rB,rA 127 lbz rB,1(r4) 128 subf. rC,rB,rA 133 lbz rB,2(r4) 134 subf. rC,rB,rA 139 lbz rB,3(r4) 140 subf. rC,rB,rA 173 LD rB,0,r4 [all …]
|
/linux-6.12.1/arch/powerpc/lib/ |
D | memcmp_64.S | 15 #define rB r10 macro 121 lbz rB,0(r4) 122 subf. rC,rB,rA 127 lbz rB,1(r4) 128 subf. rC,rB,rA 133 lbz rB,2(r4) 134 subf. rC,rB,rA 139 lbz rB,3(r4) 140 subf. rC,rB,rA 173 LD rB,0,r4 [all …]
|
/linux-6.12.1/drivers/misc/mchp_pci1xxxx/ |
D | mchp_pci1xxxx_otpe2p.c | 99 void __iomem *rb = priv->reg_base; in is_eeprom_responsive() local 104 rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); in is_eeprom_responsive() 106 rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); in is_eeprom_responsive() 111 true, rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); in is_eeprom_responsive() 124 void __iomem *rb = priv->reg_base; in pci1xxxx_eeprom_read() local 141 writel(EEPROM_CMD_EPC_BUSY_BIT | (off + byte), rb + in pci1xxxx_eeprom_read() 148 rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); in pci1xxxx_eeprom_read() 154 buf[byte] = readl(rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG)); in pci1xxxx_eeprom_read() 165 void __iomem *rb = priv->reg_base; in pci1xxxx_eeprom_write() local 182 writel(*(value + byte), rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG)); in pci1xxxx_eeprom_write() [all …]
|
/linux-6.12.1/arch/arm64/crypto/ |
D | sm3-neon-core.S | 42 #define rb w4 macro 356 ldp ra, rb, [RSTATE, #0] 401 R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 0, 0, IW, _, 0) 402 R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 1, 1, IW, _, 0) 403 R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 2, 2, IW, _, 0) 404 R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 3, 3, IW, _, 0) 407 R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 4, 0, IW, _, 0) 408 R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 5, 1, IW, _, 0) 409 R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 6, 2, IW, SCHED_W_W0W1W2W3W4W5_1, 12) 410 R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 7, 3, IW, SCHED_W_W0W1W2W3W4W5_2, 12) [all …]
|
/linux-6.12.1/arch/powerpc/crypto/ |
D | sha1-powerpc-asm.S | 28 #define RB(t) ((((t)+3)%6)+7) macro 40 andc r0,RD(t),RB(t); \ 41 and r6,RB(t),RC(t); \ 48 rotlwi RB(t),RB(t),30; \ 52 and r6,RB(t),RC(t); \ 53 andc r0,RD(t),RB(t); \ 55 rotlwi RB(t),RB(t),30; \ 67 xor r6,RB(t),RC(t); \ 69 rotlwi RB(t),RB(t),30; \ 77 xor r6,RB(t),RC(t); \ [all …]
|
/linux-6.12.1/mm/ |
D | interval_tree.c | 23 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb, 38 if (!prev->shared.rb.rb_right) { in vma_interval_tree_insert_after() 40 link = &prev->shared.rb.rb_right; in vma_interval_tree_insert_after() 42 parent = rb_entry(prev->shared.rb.rb_right, in vma_interval_tree_insert_after() 43 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after() 46 while (parent->shared.rb.rb_left) { in vma_interval_tree_insert_after() 47 parent = rb_entry(parent->shared.rb.rb_left, in vma_interval_tree_insert_after() 48 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after() 52 link = &parent->shared.rb.rb_left; in vma_interval_tree_insert_after() 56 rb_link_node(&node->shared.rb, &parent->shared.rb, link); in vma_interval_tree_insert_after() [all …]
|
/linux-6.12.1/arch/powerpc/xmon/ |
D | spu-insns.h | 15 RRR | op | RC | RB | RA | RT | RI7 | op | I7 | RA | RT | 33 RR | op | RB | RA | RT | LBT | op |RO| I16 | RO | 53 ASM_RRR mnemonic RC, RA, RB, RT ASM_RI4 mnemonic RT, RA, I4 71 ASM_RAB mnemonic RA, RB 73 ASM_RR mnemonic RT, RA, RB 85 The first(most significant) digit is always 0. Then it is followd by RC, RB, RA and RT digits. 90 …ction has 00113 as the DEPENDENCY field. This means RC is not used in this operation, RB and RA are 122 /* 0[RC][RB][RA][RT] */ 181 APUOP(M_CBX, RR, 0x1d4, "cbx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz… 182 APUOP(M_CHX, RR, 0x1d5, "chx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz… [all …]
|
/linux-6.12.1/drivers/target/iscsi/ |
D | iscsi_target_configfs.c | 44 ssize_t rb; in lio_target_np_driver_show() local 48 rb = sysfs_emit(page, "1\n"); in lio_target_np_driver_show() 50 rb = sysfs_emit(page, "0\n"); in lio_target_np_driver_show() 52 return rb; in lio_target_np_driver_show() 474 ssize_t rb; \ 479 rb = snprintf(page, PAGE_SIZE, \ 483 rb = snprintf(page, PAGE_SIZE, "%u\n", \ 488 return rb; \ 530 ssize_t rb = 0; in lio_target_nacl_info_show() local 536 rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator" in lio_target_nacl_info_show() [all …]
|