1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! This module provides a wrapper for the C `struct request` type.
4 //!
5 //! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h)
6 
7 use crate::{
8     bindings,
9     block::mq::Operations,
10     error::Result,
11     types::{ARef, AlwaysRefCounted, Opaque},
12 };
13 use core::{
14     marker::PhantomData,
15     ptr::{addr_of_mut, NonNull},
16     sync::atomic::{AtomicU64, Ordering},
17 };
18 
19 /// A wrapper around a blk-mq `struct request`. This represents an IO request.
20 ///
21 /// # Implementation details
22 ///
23 /// There are four states for a request that the Rust bindings care about:
24 ///
25 /// A) Request is owned by block layer (refcount 0)
26 /// B) Request is owned by driver but with zero `ARef`s in existence
27 ///    (refcount 1)
28 /// C) Request is owned by driver with exactly one `ARef` in existence
29 ///    (refcount 2)
30 /// D) Request is owned by driver with more than one `ARef` in existence
31 ///    (refcount > 2)
32 ///
33 ///
34 /// We need to track A and B to ensure we fail tag to request conversions for
35 /// requests that are not owned by the driver.
36 ///
37 /// We need to track C and D to ensure that it is safe to end the request and hand
38 /// back ownership to the block layer.
39 ///
40 /// The states are tracked through the private `refcount` field of
41 /// `RequestDataWrapper`. This structure lives in the private data area of the C
42 /// `struct request`.
43 ///
44 /// # Invariants
45 ///
46 /// * `self.0` is a valid `struct request` created by the C portion of the kernel.
47 /// * The private data area associated with this request must be an initialized
48 ///   and valid `RequestDataWrapper<T>`.
49 /// * `self` is reference counted by atomic modification of
50 ///   self.wrapper_ref().refcount().
51 ///
52 #[repr(transparent)]
53 pub struct Request<T: Operations>(Opaque<bindings::request>, PhantomData<T>);
54 
55 impl<T: Operations> Request<T> {
56     /// Create an `ARef<Request>` from a `struct request` pointer.
57     ///
58     /// # Safety
59     ///
60     /// * The caller must own a refcount on `ptr` that is transferred to the
61     ///   returned `ARef`.
62     /// * The type invariants for `Request` must hold for the pointee of `ptr`.
aref_from_raw(ptr: *mut bindings::request) -> ARef<Self>63     pub(crate) unsafe fn aref_from_raw(ptr: *mut bindings::request) -> ARef<Self> {
64         // INVARIANT: By the safety requirements of this function, invariants are upheld.
65         // SAFETY: By the safety requirement of this function, we own a
66         // reference count that we can pass to `ARef`.
67         unsafe { ARef::from_raw(NonNull::new_unchecked(ptr as *const Self as *mut Self)) }
68     }
69 
70     /// Notify the block layer that a request is going to be processed now.
71     ///
72     /// The block layer uses this hook to do proper initializations such as
73     /// starting the timeout timer. It is a requirement that block device
74     /// drivers call this function when starting to process a request.
75     ///
76     /// # Safety
77     ///
78     /// The caller must have exclusive ownership of `self`, that is
79     /// `self.wrapper_ref().refcount() == 2`.
start_unchecked(this: &ARef<Self>)80     pub(crate) unsafe fn start_unchecked(this: &ARef<Self>) {
81         // SAFETY: By type invariant, `self.0` is a valid `struct request` and
82         // we have exclusive access.
83         unsafe { bindings::blk_mq_start_request(this.0.get()) };
84     }
85 
86     /// Try to take exclusive ownership of `this` by dropping the refcount to 0.
87     /// This fails if `this` is not the only `ARef` pointing to the underlying
88     /// `Request`.
89     ///
90     /// If the operation is successful, `Ok` is returned with a pointer to the
91     /// C `struct request`. If the operation fails, `this` is returned in the
92     /// `Err` variant.
try_set_end(this: ARef<Self>) -> Result<*mut bindings::request, ARef<Self>>93     fn try_set_end(this: ARef<Self>) -> Result<*mut bindings::request, ARef<Self>> {
94         // We can race with `TagSet::tag_to_rq`
95         if let Err(_old) = this.wrapper_ref().refcount().compare_exchange(
96             2,
97             0,
98             Ordering::Relaxed,
99             Ordering::Relaxed,
100         ) {
101             return Err(this);
102         }
103 
104         let request_ptr = this.0.get();
105         core::mem::forget(this);
106 
107         Ok(request_ptr)
108     }
109 
110     /// Notify the block layer that the request has been completed without errors.
111     ///
112     /// This function will return `Err` if `this` is not the only `ARef`
113     /// referencing the request.
end_ok(this: ARef<Self>) -> Result<(), ARef<Self>>114     pub fn end_ok(this: ARef<Self>) -> Result<(), ARef<Self>> {
115         let request_ptr = Self::try_set_end(this)?;
116 
117         // SAFETY: By type invariant, `this.0` was a valid `struct request`. The
118         // success of the call to `try_set_end` guarantees that there are no
119         // `ARef`s pointing to this request. Therefore it is safe to hand it
120         // back to the block layer.
121         unsafe { bindings::blk_mq_end_request(request_ptr, bindings::BLK_STS_OK as _) };
122 
123         Ok(())
124     }
125 
126     /// Return a pointer to the `RequestDataWrapper` stored in the private area
127     /// of the request structure.
128     ///
129     /// # Safety
130     ///
131     /// - `this` must point to a valid allocation of size at least size of
132     ///   `Self` plus size of `RequestDataWrapper`.
wrapper_ptr(this: *mut Self) -> NonNull<RequestDataWrapper>133     pub(crate) unsafe fn wrapper_ptr(this: *mut Self) -> NonNull<RequestDataWrapper> {
134         let request_ptr = this.cast::<bindings::request>();
135         // SAFETY: By safety requirements for this function, `this` is a
136         // valid allocation.
137         let wrapper_ptr =
138             unsafe { bindings::blk_mq_rq_to_pdu(request_ptr).cast::<RequestDataWrapper>() };
139         // SAFETY: By C API contract, wrapper_ptr points to a valid allocation
140         // and is not null.
141         unsafe { NonNull::new_unchecked(wrapper_ptr) }
142     }
143 
144     /// Return a reference to the `RequestDataWrapper` stored in the private
145     /// area of the request structure.
wrapper_ref(&self) -> &RequestDataWrapper146     pub(crate) fn wrapper_ref(&self) -> &RequestDataWrapper {
147         // SAFETY: By type invariant, `self.0` is a valid allocation. Further,
148         // the private data associated with this request is initialized and
149         // valid. The existence of `&self` guarantees that the private data is
150         // valid as a shared reference.
151         unsafe { Self::wrapper_ptr(self as *const Self as *mut Self).as_ref() }
152     }
153 }
154 
155 /// A wrapper around data stored in the private area of the C `struct request`.
156 pub(crate) struct RequestDataWrapper {
157     /// The Rust request refcount has the following states:
158     ///
159     /// - 0: The request is owned by C block layer.
160     /// - 1: The request is owned by Rust abstractions but there are no ARef references to it.
161     /// - 2+: There are `ARef` references to the request.
162     refcount: AtomicU64,
163 }
164 
165 impl RequestDataWrapper {
166     /// Return a reference to the refcount of the request that is embedding
167     /// `self`.
refcount(&self) -> &AtomicU64168     pub(crate) fn refcount(&self) -> &AtomicU64 {
169         &self.refcount
170     }
171 
172     /// Return a pointer to the refcount of the request that is embedding the
173     /// pointee of `this`.
174     ///
175     /// # Safety
176     ///
177     /// - `this` must point to a live allocation of at least the size of `Self`.
refcount_ptr(this: *mut Self) -> *mut AtomicU64178     pub(crate) unsafe fn refcount_ptr(this: *mut Self) -> *mut AtomicU64 {
179         // SAFETY: Because of the safety requirements of this function, the
180         // field projection is safe.
181         unsafe { addr_of_mut!((*this).refcount) }
182     }
183 }
184 
185 // SAFETY: Exclusive access is thread-safe for `Request`. `Request` has no `&mut
186 // self` methods and `&self` methods that mutate `self` are internally
187 // synchronized.
188 unsafe impl<T: Operations> Send for Request<T> {}
189 
190 // SAFETY: Shared access is thread-safe for `Request`. `&self` methods that
191 // mutate `self` are internally synchronized`
192 unsafe impl<T: Operations> Sync for Request<T> {}
193 
194 /// Store the result of `op(target.load())` in target, returning new value of
195 /// target.
atomic_relaxed_op_return(target: &AtomicU64, op: impl Fn(u64) -> u64) -> u64196 fn atomic_relaxed_op_return(target: &AtomicU64, op: impl Fn(u64) -> u64) -> u64 {
197     let old = target.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| Some(op(x)));
198 
199     // SAFETY: Because the operation passed to `fetch_update` above always
200     // return `Some`, `old` will always be `Ok`.
201     let old = unsafe { old.unwrap_unchecked() };
202 
203     op(old)
204 }
205 
206 /// Store the result of `op(target.load)` in `target` if `target.load() !=
207 /// pred`, returning true if the target was updated.
atomic_relaxed_op_unless(target: &AtomicU64, op: impl Fn(u64) -> u64, pred: u64) -> bool208 fn atomic_relaxed_op_unless(target: &AtomicU64, op: impl Fn(u64) -> u64, pred: u64) -> bool {
209     target
210         .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| {
211             if x == pred {
212                 None
213             } else {
214                 Some(op(x))
215             }
216         })
217         .is_ok()
218 }
219 
220 // SAFETY: All instances of `Request<T>` are reference counted. This
221 // implementation of `AlwaysRefCounted` ensure that increments to the ref count
222 // keeps the object alive in memory at least until a matching reference count
223 // decrement is executed.
224 unsafe impl<T: Operations> AlwaysRefCounted for Request<T> {
inc_ref(&self)225     fn inc_ref(&self) {
226         let refcount = &self.wrapper_ref().refcount();
227 
228         #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
229         let updated = atomic_relaxed_op_unless(refcount, |x| x + 1, 0);
230 
231         #[cfg(CONFIG_DEBUG_MISC)]
232         if !updated {
233             panic!("Request refcount zero on clone")
234         }
235     }
236 
dec_ref(obj: core::ptr::NonNull<Self>)237     unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
238         // SAFETY: The type invariants of `ARef` guarantee that `obj` is valid
239         // for read.
240         let wrapper_ptr = unsafe { Self::wrapper_ptr(obj.as_ptr()).as_ptr() };
241         // SAFETY: The type invariant of `Request` guarantees that the private
242         // data area is initialized and valid.
243         let refcount = unsafe { &*RequestDataWrapper::refcount_ptr(wrapper_ptr) };
244 
245         #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
246         let new_refcount = atomic_relaxed_op_return(refcount, |x| x - 1);
247 
248         #[cfg(CONFIG_DEBUG_MISC)]
249         if new_refcount == 0 {
250             panic!("Request reached refcount zero in Rust abstractions");
251         }
252     }
253 }
254