Lines Matching full:reorder

243  *   serialization, if present in one of the percpu reorder queues.
246 * the cpu's reorder queue.
252 struct padata_list *reorder; in padata_find_next() local
255 reorder = per_cpu_ptr(pd->reorder_list, cpu); in padata_find_next()
257 spin_lock(&reorder->lock); in padata_find_next()
258 if (list_empty(&reorder->list)) { in padata_find_next()
259 spin_unlock(&reorder->lock); in padata_find_next()
263 padata = list_entry(reorder->list.next, struct padata_priv, list); in padata_find_next()
270 spin_unlock(&reorder->lock); in padata_find_next()
280 spin_unlock(&reorder->lock); in padata_find_next()
290 struct padata_list *reorder; in padata_reorder() local
294 * the reorder queue the time. Calculating in which percpu reorder in padata_reorder()
297 * the objects arrive to the reorder queues. So a cpu could wait to in padata_reorder()
311 * cpu's reorder queue, nothing to do for now. in padata_reorder()
330 * the reorder queues in the meantime. in padata_reorder()
332 * Ensure reorder queue is read after pd->lock is dropped so we see in padata_reorder()
338 reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); in padata_reorder()
339 if (!list_empty(&reorder->list) && padata_find_next(pd, false)) in padata_reorder()
399 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); in padata_do_serial() local
403 spin_lock(&reorder->lock); in padata_do_serial()
405 list_for_each_prev(pos, &reorder->list) { in padata_do_serial()
412 spin_unlock(&reorder->lock); in padata_do_serial()
415 * Ensure the addition to the reorder list is ordered correctly in padata_do_serial()
574 /* Initialize per-CPU reorder lists */