Lines Matching refs:ctx_kern

29 	struct hid_bpf_ctx_kern ctx_kern = {  in dispatch_hid_bpf_device_event()  local
48 memset(ctx_kern.data, 0, hdev->bpf.allocated_data); in dispatch_hid_bpf_device_event()
49 memcpy(ctx_kern.data, data, *size); in dispatch_hid_bpf_device_event()
54 ret = e->hid_device_event(&ctx_kern.ctx, type, source); in dispatch_hid_bpf_device_event()
61 ctx_kern.ctx.size = ret; in dispatch_hid_bpf_device_event()
66 ret = ctx_kern.ctx.size; in dispatch_hid_bpf_device_event()
68 if (ret > ctx_kern.ctx.allocated_size) in dispatch_hid_bpf_device_event()
74 return ctx_kern.data; in dispatch_hid_bpf_device_event()
84 struct hid_bpf_ctx_kern ctx_kern = { in dispatch_hid_bpf_raw_requests() local
105 ret = e->hid_hw_request(&ctx_kern.ctx, reportnum, rtype, reqtype, source); in dispatch_hid_bpf_raw_requests()
121 struct hid_bpf_ctx_kern ctx_kern = { in dispatch_hid_bpf_output_report() local
139 ret = e->hid_hw_output_report(&ctx_kern.ctx, source); in dispatch_hid_bpf_output_report()
154 struct hid_bpf_ctx_kern ctx_kern = { in call_hid_bpf_rdesc_fixup() local
165 ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL); in call_hid_bpf_rdesc_fixup()
166 if (!ctx_kern.data) in call_hid_bpf_rdesc_fixup()
169 memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE)); in call_hid_bpf_rdesc_fixup()
171 ret = hdev->bpf.rdesc_ops->hid_rdesc_fixup(&ctx_kern.ctx); in call_hid_bpf_rdesc_fixup()
176 if (ret > ctx_kern.ctx.allocated_size) in call_hid_bpf_rdesc_fixup()
182 return krealloc(ctx_kern.data, *size, GFP_KERNEL); in call_hid_bpf_rdesc_fixup()
185 kfree(ctx_kern.data); in call_hid_bpf_rdesc_fixup()
284 struct hid_bpf_ctx_kern *ctx_kern; in hid_bpf_get_data() local
289 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); in hid_bpf_get_data()
294 return ctx_kern->data + offset; in hid_bpf_get_data()
308 struct hid_bpf_ctx_kern *ctx_kern = NULL; in hid_bpf_allocate_context() local
314 ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL); in hid_bpf_allocate_context()
315 if (!ctx_kern) { in hid_bpf_allocate_context()
320 ctx_kern->ctx.hid = hdev; in hid_bpf_allocate_context()
322 return &ctx_kern->ctx; in hid_bpf_allocate_context()
334 struct hid_bpf_ctx_kern *ctx_kern; in hid_bpf_release_context() local
337 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); in hid_bpf_release_context()
338 hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */ in hid_bpf_release_context()
340 kfree(ctx_kern); in hid_bpf_release_context()
401 struct hid_bpf_ctx_kern *ctx_kern; in hid_bpf_hw_request() local
407 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); in hid_bpf_hw_request()
409 if (ctx_kern->from_bpf) in hid_bpf_hw_request()
463 struct hid_bpf_ctx_kern *ctx_kern; in hid_bpf_hw_output_report() local
469 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); in hid_bpf_hw_output_report()
470 if (ctx_kern->from_bpf) in hid_bpf_hw_output_report()
494 struct hid_bpf_ctx_kern *ctx_kern; in __hid_bpf_input_report() local
497 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); in __hid_bpf_input_report()
498 if (ctx_kern->from_bpf) in __hid_bpf_input_report()
525 struct hid_bpf_ctx_kern *ctx_kern; in hid_bpf_try_input_report() local
528 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); in hid_bpf_try_input_report()
529 from_hid_event_hook = ctx_kern->data && ctx_kern->data == ctx->hid->bpf.device_data; in hid_bpf_try_input_report()