Lines Matching +full:1 +full:q
50 static int queue_list_add(struct snd_seq_queue *q) in queue_list_add() argument
57 queue_list[i] = q; in queue_list_add()
58 q->queue = i; in queue_list_add()
63 return -1; in queue_list_add()
68 struct snd_seq_queue *q; in queue_list_remove() local
71 q = queue_list[id]; in queue_list_remove()
72 if (q) { in queue_list_remove()
73 guard(spinlock)(&q->owner_lock); in queue_list_remove()
74 if (q->owner == client) { in queue_list_remove()
76 q->klocked = 1; in queue_list_remove()
79 return q; in queue_list_remove()
90 struct snd_seq_queue *q; in queue_new() local
92 q = kzalloc(sizeof(*q), GFP_KERNEL); in queue_new()
93 if (!q) in queue_new()
96 spin_lock_init(&q->owner_lock); in queue_new()
97 spin_lock_init(&q->check_lock); in queue_new()
98 mutex_init(&q->timer_mutex); in queue_new()
99 snd_use_lock_init(&q->use_lock); in queue_new()
100 q->queue = -1; in queue_new()
102 q->tickq = snd_seq_prioq_new(); in queue_new()
103 q->timeq = snd_seq_prioq_new(); in queue_new()
104 q->timer = snd_seq_timer_new(); in queue_new()
105 if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) { in queue_new()
106 snd_seq_prioq_delete(&q->tickq); in queue_new()
107 snd_seq_prioq_delete(&q->timeq); in queue_new()
108 snd_seq_timer_delete(&q->timer); in queue_new()
109 kfree(q); in queue_new()
113 q->owner = owner; in queue_new()
114 q->locked = locked; in queue_new()
115 q->klocked = 0; in queue_new()
117 return q; in queue_new()
121 static void queue_delete(struct snd_seq_queue *q) in queue_delete() argument
124 mutex_lock(&q->timer_mutex); in queue_delete()
125 snd_seq_timer_stop(q->timer); in queue_delete()
126 snd_seq_timer_close(q); in queue_delete()
127 mutex_unlock(&q->timer_mutex); in queue_delete()
129 snd_use_lock_sync(&q->use_lock); in queue_delete()
131 snd_seq_prioq_delete(&q->tickq); in queue_delete()
132 snd_seq_prioq_delete(&q->timeq); in queue_delete()
133 snd_seq_timer_delete(&q->timer); in queue_delete()
135 kfree(q); in queue_delete()
157 * The new queue's use_lock is set to 1. It is the caller's responsibility to
158 * call snd_use_lock_free(&q->use_lock).
162 struct snd_seq_queue *q; in snd_seq_queue_alloc() local
164 q = queue_new(client, locked); in snd_seq_queue_alloc()
165 if (q == NULL) in snd_seq_queue_alloc()
167 q->info_flags = info_flags; in snd_seq_queue_alloc()
168 queue_use(q, client, 1); in snd_seq_queue_alloc()
169 snd_use_lock_use(&q->use_lock); in snd_seq_queue_alloc()
170 if (queue_list_add(q) < 0) { in snd_seq_queue_alloc()
171 snd_use_lock_free(&q->use_lock); in snd_seq_queue_alloc()
172 queue_delete(q); in snd_seq_queue_alloc()
175 return q; in snd_seq_queue_alloc()
181 struct snd_seq_queue *q; in snd_seq_queue_delete() local
185 q = queue_list_remove(queueid, client); in snd_seq_queue_delete()
186 if (q == NULL) in snd_seq_queue_delete()
188 queue_delete(q); in snd_seq_queue_delete()
197 struct snd_seq_queue *q; in queueptr() local
202 q = queue_list[queueid]; in queueptr()
203 if (q) in queueptr()
204 snd_use_lock_use(&q->use_lock); in queueptr()
205 return q; in queueptr()
212 struct snd_seq_queue *q; in snd_seq_queue_find_name() local
215 q = queueptr(i); in snd_seq_queue_find_name()
216 if (q) { in snd_seq_queue_find_name()
217 if (strncmp(q->name, name, sizeof(q->name)) == 0) in snd_seq_queue_find_name()
218 return q; in snd_seq_queue_find_name()
219 queuefree(q); in snd_seq_queue_find_name()
230 void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) in snd_seq_check_queue() argument
237 if (q == NULL) in snd_seq_check_queue()
241 scoped_guard(spinlock_irqsave, &q->check_lock) { in snd_seq_check_queue()
242 if (q->check_blocked) { in snd_seq_check_queue()
243 q->check_again = 1; in snd_seq_check_queue()
246 q->check_blocked = 1; in snd_seq_check_queue()
251 cur_tick = snd_seq_timer_get_cur_tick(q->timer); in snd_seq_check_queue()
253 cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick); in snd_seq_check_queue()
262 cur_time = snd_seq_timer_get_cur_time(q->timer, false); in snd_seq_check_queue()
264 cell = snd_seq_prioq_cell_out(q->timeq, &cur_time); in snd_seq_check_queue()
274 scoped_guard(spinlock_irqsave, &q->check_lock) { in snd_seq_check_queue()
275 if (q->check_again) { in snd_seq_check_queue()
276 q->check_again = 0; in snd_seq_check_queue()
280 q->check_blocked = 0; in snd_seq_check_queue()
289 struct snd_seq_queue *q; in snd_seq_enqueue_event() local
294 q = queueptr(dest); in snd_seq_enqueue_event()
295 if (q == NULL) in snd_seq_enqueue_event()
301 cell->event.time.tick += q->timer->tick.cur_tick; in snd_seq_enqueue_event()
306 &q->timer->cur_time); in snd_seq_enqueue_event()
315 err = snd_seq_prioq_cell_in(q->tickq, cell); in snd_seq_enqueue_event()
320 err = snd_seq_prioq_cell_in(q->timeq, cell); in snd_seq_enqueue_event()
325 queuefree(q); /* unlock */ in snd_seq_enqueue_event()
330 snd_seq_check_queue(q, atomic, hop); in snd_seq_enqueue_event()
332 queuefree(q); /* unlock */ in snd_seq_enqueue_event()
340 static inline int check_access(struct snd_seq_queue *q, int client) in check_access() argument
342 return (q->owner == client) || (!q->locked && !q->klocked); in check_access()
348 static int queue_access_lock(struct snd_seq_queue *q, int client) in queue_access_lock() argument
352 guard(spinlock_irqsave)(&q->owner_lock); in queue_access_lock()
353 access_ok = check_access(q, client); in queue_access_lock()
355 q->klocked = 1; in queue_access_lock()
360 static inline void queue_access_unlock(struct snd_seq_queue *q) in queue_access_unlock() argument
362 guard(spinlock_irqsave)(&q->owner_lock); in queue_access_unlock()
363 q->klocked = 0; in queue_access_unlock()
369 struct snd_seq_queue *q = queueptr(queueid); in snd_seq_queue_check_access() local
372 if (! q) in snd_seq_queue_check_access()
374 scoped_guard(spinlock_irqsave, &q->owner_lock) in snd_seq_queue_check_access()
375 access_ok = check_access(q, client); in snd_seq_queue_check_access()
376 queuefree(q); in snd_seq_queue_check_access()
387 struct snd_seq_queue *q = queueptr(queueid); in snd_seq_queue_set_owner() local
389 if (q == NULL) in snd_seq_queue_set_owner()
392 if (! queue_access_lock(q, client)) { in snd_seq_queue_set_owner()
393 queuefree(q); in snd_seq_queue_set_owner()
397 scoped_guard(spinlock_irqsave, &q->owner_lock) { in snd_seq_queue_set_owner()
398 q->locked = locked ? 1 : 0; in snd_seq_queue_set_owner()
399 q->owner = client; in snd_seq_queue_set_owner()
401 queue_access_unlock(q); in snd_seq_queue_set_owner()
402 queuefree(q); in snd_seq_queue_set_owner()
411 * q->use mutex should be down before calling this function to avoid
434 * q->use mutex should be down before calling this function
453 struct snd_seq_queue *q = queueptr(queueid); in snd_seq_queue_timer_set_tempo() local
456 if (q == NULL) in snd_seq_queue_timer_set_tempo()
458 if (! queue_access_lock(q, client)) { in snd_seq_queue_timer_set_tempo()
459 queuefree(q); in snd_seq_queue_timer_set_tempo()
463 result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq, in snd_seq_queue_timer_set_tempo()
466 result = snd_seq_timer_set_skew(q->timer, info->skew_value, in snd_seq_queue_timer_set_tempo()
468 queue_access_unlock(q); in snd_seq_queue_timer_set_tempo()
469 queuefree(q); in snd_seq_queue_timer_set_tempo()
484 if (use && queue->clients == 1) in queue_use()
513 * return 0 if not used, 1 if used.
517 struct snd_seq_queue *q; in snd_seq_queue_is_used() local
520 q = queueptr(queueid); in snd_seq_queue_is_used()
521 if (q == NULL) in snd_seq_queue_is_used()
523 result = test_bit(client, q->clients_bitmap) ? 1 : 0; in snd_seq_queue_is_used()
524 queuefree(q); in snd_seq_queue_is_used()
538 struct snd_seq_queue *q; in snd_seq_queue_client_leave() local
542 q = queue_list_remove(i, client); in snd_seq_queue_client_leave()
543 if (q) in snd_seq_queue_client_leave()
544 queue_delete(q); in snd_seq_queue_client_leave()
551 q = queueptr(i); in snd_seq_queue_client_leave()
552 if (!q) in snd_seq_queue_client_leave()
554 if (test_bit(client, q->clients_bitmap)) { in snd_seq_queue_client_leave()
555 snd_seq_prioq_leave(q->tickq, client, 0); in snd_seq_queue_client_leave()
556 snd_seq_prioq_leave(q->timeq, client, 0); in snd_seq_queue_client_leave()
557 snd_seq_queue_use(q->queue, client, 0); in snd_seq_queue_client_leave()
559 queuefree(q); in snd_seq_queue_client_leave()
571 struct snd_seq_queue *q; in snd_seq_queue_client_leave_cells() local
574 q = queueptr(i); in snd_seq_queue_client_leave_cells()
575 if (!q) in snd_seq_queue_client_leave_cells()
577 snd_seq_prioq_leave(q->tickq, client, 0); in snd_seq_queue_client_leave_cells()
578 snd_seq_prioq_leave(q->timeq, client, 0); in snd_seq_queue_client_leave_cells()
579 queuefree(q); in snd_seq_queue_client_leave_cells()
587 struct snd_seq_queue *q; in snd_seq_queue_remove_cells() local
590 q = queueptr(i); in snd_seq_queue_remove_cells()
591 if (!q) in snd_seq_queue_remove_cells()
593 if (test_bit(client, q->clients_bitmap) && in snd_seq_queue_remove_cells()
595 q->queue == info->queue)) { in snd_seq_queue_remove_cells()
596 snd_seq_prioq_remove_events(q->tickq, client, info); in snd_seq_queue_remove_cells()
597 snd_seq_prioq_remove_events(q->timeq, client, info); in snd_seq_queue_remove_cells()
599 queuefree(q); in snd_seq_queue_remove_cells()
608 static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev, in queue_broadcast_event() argument
616 sev.time.tick = q->timer->tick.cur_tick; in queue_broadcast_event()
617 sev.queue = q->queue; in queue_broadcast_event()
618 sev.data.queue.queue = q->queue; in queue_broadcast_event()
631 static void snd_seq_queue_process_event(struct snd_seq_queue *q, in snd_seq_queue_process_event() argument
637 snd_seq_prioq_leave(q->tickq, ev->source.client, 1); in snd_seq_queue_process_event()
638 snd_seq_prioq_leave(q->timeq, ev->source.client, 1); in snd_seq_queue_process_event()
639 if (! snd_seq_timer_start(q->timer)) in snd_seq_queue_process_event()
640 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
644 if (! snd_seq_timer_continue(q->timer)) in snd_seq_queue_process_event()
645 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
649 snd_seq_timer_stop(q->timer); in snd_seq_queue_process_event()
650 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
654 snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value); in snd_seq_queue_process_event()
655 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
659 if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) { in snd_seq_queue_process_event()
660 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
665 if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) { in snd_seq_queue_process_event()
666 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
670 if (snd_seq_timer_set_skew(q->timer, in snd_seq_queue_process_event()
673 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
686 struct snd_seq_queue *q; in snd_seq_control_queue() local
690 q = queueptr(ev->data.queue.queue); in snd_seq_control_queue()
692 if (q == NULL) in snd_seq_control_queue()
695 if (! queue_access_lock(q, ev->source.client)) { in snd_seq_control_queue()
696 queuefree(q); in snd_seq_control_queue()
700 snd_seq_queue_process_event(q, ev, atomic, hop); in snd_seq_control_queue()
702 queue_access_unlock(q); in snd_seq_control_queue()
703 queuefree(q); in snd_seq_control_queue()
716 struct snd_seq_queue *q; in snd_seq_info_queues_read() local
722 q = queueptr(i); in snd_seq_info_queues_read()
723 if (!q) in snd_seq_info_queues_read()
726 tmr = q->timer; in snd_seq_info_queues_read()
732 scoped_guard(spinlock_irq, &q->owner_lock) { in snd_seq_info_queues_read()
733 locked = q->locked; in snd_seq_info_queues_read()
734 owner = q->owner; in snd_seq_info_queues_read()
737 snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name); in snd_seq_info_queues_read()
740 snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq)); in snd_seq_info_queues_read()
741 snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq)); in snd_seq_info_queues_read()
750 queuefree(q); in snd_seq_info_queues_read()