Lines Matching +full:comp +full:- +full:int

1 // SPDX-License-Identifier: GPL-2.0
3 * core.c - Implementation of core module of MOST Linux driver stack
5 * Copyright (C) 2013-2020 Microchip Technology Germany II GmbH & Co. KG
21 #include <linux/dma-mapping.h>
29 static int dummy_num_buffers;
33 struct most_component *comp; member
34 int refs;
35 int num_buffers;
48 int is_starving;
68 int dev_id;
75 int most_ch_data_type;
86 * list_pop_mbo - retrieves the first MBO of the list and removes it
92 list_del(&_mbo->list); \
97 * most_free_mbo_coherent - free an MBO and its coherent buffer
102 struct most_channel *c = mbo->context; in most_free_mbo_coherent()
103 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len; in most_free_mbo_coherent()
105 if (c->iface->dma_free) in most_free_mbo_coherent()
106 c->iface->dma_free(mbo, coherent_buf_size); in most_free_mbo_coherent()
108 kfree(mbo->virt_address); in most_free_mbo_coherent()
110 if (atomic_sub_and_test(1, &c->mbo_ref)) in most_free_mbo_coherent()
111 complete(&c->cleanup); in most_free_mbo_coherent()
115 * flush_channel_fifos - clear the channel fifos
123 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo)) in flush_channel_fifos()
126 spin_lock_irqsave(&c->fifo_lock, flags); in flush_channel_fifos()
127 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) { in flush_channel_fifos()
128 list_del(&mbo->list); in flush_channel_fifos()
129 spin_unlock_irqrestore(&c->fifo_lock, flags); in flush_channel_fifos()
131 spin_lock_irqsave(&c->fifo_lock, flags); in flush_channel_fifos()
133 spin_unlock_irqrestore(&c->fifo_lock, flags); in flush_channel_fifos()
135 spin_lock_irqsave(&c->fifo_lock, hf_flags); in flush_channel_fifos()
136 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) { in flush_channel_fifos()
137 list_del(&mbo->list); in flush_channel_fifos()
138 spin_unlock_irqrestore(&c->fifo_lock, hf_flags); in flush_channel_fifos()
140 spin_lock_irqsave(&c->fifo_lock, hf_flags); in flush_channel_fifos()
142 spin_unlock_irqrestore(&c->fifo_lock, hf_flags); in flush_channel_fifos()
144 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo)))) in flush_channel_fifos()
145 dev_warn(&c->dev, "Channel or trash fifo not empty\n"); in flush_channel_fifos()
149 * flush_trash_fifo - clear the trash fifo
152 static int flush_trash_fifo(struct most_channel *c) in flush_trash_fifo()
157 spin_lock_irqsave(&c->fifo_lock, flags); in flush_trash_fifo()
158 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) { in flush_trash_fifo()
159 list_del(&mbo->list); in flush_trash_fifo()
160 spin_unlock_irqrestore(&c->fifo_lock, flags); in flush_trash_fifo()
162 spin_lock_irqsave(&c->fifo_lock, flags); in flush_trash_fifo()
164 spin_unlock_irqrestore(&c->fifo_lock, flags); in flush_trash_fifo()
173 unsigned int i = c->channel_id; in available_directions_show()
176 if (c->iface->channel_vector[i].direction & MOST_CH_RX) in available_directions_show()
178 if (c->iface->channel_vector[i].direction & MOST_CH_TX) in available_directions_show()
189 unsigned int i = c->channel_id; in available_datatypes_show()
192 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL) in available_datatypes_show()
194 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC) in available_datatypes_show()
196 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC) in available_datatypes_show()
198 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC) in available_datatypes_show()
209 unsigned int i = c->channel_id; in number_of_packet_buffers_show()
212 c->iface->channel_vector[i].num_buffers_packet); in number_of_packet_buffers_show()
220 unsigned int i = c->channel_id; in number_of_stream_buffers_show()
223 c->iface->channel_vector[i].num_buffers_streaming); in number_of_stream_buffers_show()
231 unsigned int i = c->channel_id; in size_of_packet_buffer_show()
234 c->iface->channel_vector[i].buffer_size_packet); in size_of_packet_buffer_show()
242 unsigned int i = c->channel_id; in size_of_stream_buffer_show()
245 c->iface->channel_vector[i].buffer_size_streaming); in size_of_stream_buffer_show()
254 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving); in channel_starving_show()
263 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers); in set_number_of_buffers_show()
272 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size); in set_buffer_size_show()
281 if (c->cfg.direction & MOST_CH_TX) in set_direction_show()
283 else if (c->cfg.direction & MOST_CH_RX) in set_direction_show()
292 int i; in set_datatype_show()
296 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type) in set_datatype_show()
309 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size); in set_subbuffer_size_show()
318 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact); in set_packets_per_xact_show()
326 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size); in set_dbr_size_show()
331 struct attribute *attr, int index) in channel_attr_is_visible()
337 if (!strcmp(dev_attr->attr.name, "set_dbr_size") && in channel_attr_is_visible()
338 (c->iface->interface != ITYPE_MEDIALB_DIM2)) in channel_attr_is_visible()
340 if (!strcmp(dev_attr->attr.name, "set_packets_per_xact") && in channel_attr_is_visible()
341 (c->iface->interface != ITYPE_USB)) in channel_attr_is_visible()
344 return attr->mode; in channel_attr_is_visible()
398 return snprintf(buf, PAGE_SIZE, "%s\n", iface->description); in description_show()
407 switch (iface->interface) { in interface_show()
450 struct most_component *comp; in match_component() local
452 list_for_each_entry(comp, &comp_list, list) { in match_component()
453 if (!strcmp(comp->name, name)) in match_component()
454 return comp; in match_component()
460 int offs;
464 static int print_links(struct device *dev, void *data) in print_links()
467 int offs = d->offs; in print_links()
468 char *buf = d->buf; in print_links()
472 list_for_each_entry(c, &iface->p->channel_list, list) { in print_links()
473 if (c->pipe0.comp) { in print_links()
475 PAGE_SIZE - offs, in print_links()
477 c->pipe0.comp->name, in print_links()
478 dev_name(iface->dev), in print_links()
479 dev_name(&c->dev)); in print_links()
481 if (c->pipe1.comp) { in print_links()
483 PAGE_SIZE - offs, in print_links()
485 c->pipe1.comp->name, in print_links()
486 dev_name(iface->dev), in print_links()
487 dev_name(&c->dev)); in print_links()
490 d->offs = offs; in print_links()
494 static int most_match(struct device *dev, const struct device_driver *drv) in most_match()
517 struct most_component *comp; in components_show() local
518 int offs = 0; in components_show()
520 list_for_each_entry(comp, &comp_list, list) { in components_show()
521 offs += scnprintf(buf + offs, PAGE_SIZE - offs, "%s\n", in components_show()
522 comp->name); in components_show()
528 * get_channel - get pointer to channel
543 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { in get_channel()
544 if (!strcmp(dev_name(&c->dev), mdev_ch)) in get_channel()
551 inline int link_channel_to_component(struct most_channel *c, in link_channel_to_component()
552 struct most_component *comp, in link_channel_to_component() argument
556 int ret; in link_channel_to_component()
559 if (!c->pipe0.comp) in link_channel_to_component()
560 comp_ptr = &c->pipe0.comp; in link_channel_to_component()
561 else if (!c->pipe1.comp) in link_channel_to_component()
562 comp_ptr = &c->pipe1.comp; in link_channel_to_component()
564 return -ENOSPC; in link_channel_to_component()
566 *comp_ptr = comp; in link_channel_to_component()
567 ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, name, in link_channel_to_component()
576 int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val) in most_set_cfg_buffer_size()
581 return -ENODEV; in most_set_cfg_buffer_size()
582 c->cfg.buffer_size = val; in most_set_cfg_buffer_size()
586 int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val) in most_set_cfg_subbuffer_size()
591 return -ENODEV; in most_set_cfg_subbuffer_size()
592 c->cfg.subbuffer_size = val; in most_set_cfg_subbuffer_size()
596 int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val) in most_set_cfg_dbr_size()
601 return -ENODEV; in most_set_cfg_dbr_size()
602 c->cfg.dbr_size = val; in most_set_cfg_dbr_size()
606 int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val) in most_set_cfg_num_buffers()
611 return -ENODEV; in most_set_cfg_num_buffers()
612 c->cfg.num_buffers = val; in most_set_cfg_num_buffers()
616 int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf) in most_set_cfg_datatype()
618 int i; in most_set_cfg_datatype()
622 return -ENODEV; in most_set_cfg_datatype()
625 c->cfg.data_type = ch_data_type[i].most_ch_data_type; in most_set_cfg_datatype()
631 dev_warn(&c->dev, "Invalid attribute settings\n"); in most_set_cfg_datatype()
635 int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf) in most_set_cfg_direction()
640 return -ENODEV; in most_set_cfg_direction()
642 c->cfg.direction = MOST_CH_RX; in most_set_cfg_direction()
644 c->cfg.direction = MOST_CH_RX; in most_set_cfg_direction()
646 c->cfg.direction = MOST_CH_TX; in most_set_cfg_direction()
648 c->cfg.direction = MOST_CH_TX; in most_set_cfg_direction()
650 dev_err(&c->dev, "Invalid direction\n"); in most_set_cfg_direction()
651 return -ENODATA; in most_set_cfg_direction()
656 int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val) in most_set_cfg_packets_xact()
661 return -ENODEV; in most_set_cfg_packets_xact()
662 c->cfg.packets_per_xact = val; in most_set_cfg_packets_xact()
666 int most_cfg_complete(char *comp_name) in most_cfg_complete()
668 struct most_component *comp; in most_cfg_complete() local
670 comp = match_component(comp_name); in most_cfg_complete()
671 if (!comp) in most_cfg_complete()
672 return -ENODEV; in most_cfg_complete()
674 return comp->cfg_complete(); in most_cfg_complete()
677 int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name, in most_add_link()
681 struct most_component *comp = match_component(comp_name); in most_add_link() local
683 if (!c || !comp) in most_add_link()
684 return -ENODEV; in most_add_link()
686 return link_channel_to_component(c, comp, link_name, comp_param); in most_add_link()
689 int most_remove_link(char *mdev, char *mdev_ch, char *comp_name) in most_remove_link()
692 struct most_component *comp; in most_remove_link() local
694 comp = match_component(comp_name); in most_remove_link()
695 if (!comp) in most_remove_link()
696 return -ENODEV; in most_remove_link()
699 return -ENODEV; in most_remove_link()
701 if (comp->disconnect_channel(c->iface, c->channel_id)) in most_remove_link()
702 return -EIO; in most_remove_link()
703 if (c->pipe0.comp == comp) in most_remove_link()
704 c->pipe0.comp = NULL; in most_remove_link()
705 if (c->pipe1.comp == comp) in most_remove_link()
706 c->pipe1.comp = NULL; in most_remove_link()
739 struct most_channel *c = mbo->context; in trash_mbo()
741 spin_lock_irqsave(&c->fifo_lock, flags); in trash_mbo()
742 list_add(&mbo->list, &c->trash_fifo); in trash_mbo()
743 spin_unlock_irqrestore(&c->fifo_lock, flags); in trash_mbo()
750 if (c->enqueue_halt) in hdm_mbo_ready()
753 spin_lock_irq(&c->fifo_lock); in hdm_mbo_ready()
754 empty = list_empty(&c->halt_fifo); in hdm_mbo_ready()
755 spin_unlock_irq(&c->fifo_lock); in hdm_mbo_ready()
763 struct most_channel *c = mbo->context; in nq_hdm_mbo()
765 spin_lock_irqsave(&c->fifo_lock, flags); in nq_hdm_mbo()
766 list_add_tail(&mbo->list, &c->halt_fifo); in nq_hdm_mbo()
767 spin_unlock_irqrestore(&c->fifo_lock, flags); in nq_hdm_mbo()
768 wake_up_interruptible(&c->hdm_fifo_wq); in nq_hdm_mbo()
771 static int hdm_enqueue_thread(void *data) in hdm_enqueue_thread()
775 int ret; in hdm_enqueue_thread()
776 typeof(c->iface->enqueue) enqueue = c->iface->enqueue; in hdm_enqueue_thread()
779 wait_event_interruptible(c->hdm_fifo_wq, in hdm_enqueue_thread()
783 mutex_lock(&c->nq_mutex); in hdm_enqueue_thread()
784 spin_lock_irq(&c->fifo_lock); in hdm_enqueue_thread()
785 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) { in hdm_enqueue_thread()
786 spin_unlock_irq(&c->fifo_lock); in hdm_enqueue_thread()
787 mutex_unlock(&c->nq_mutex); in hdm_enqueue_thread()
791 mbo = list_pop_mbo(&c->halt_fifo); in hdm_enqueue_thread()
792 spin_unlock_irq(&c->fifo_lock); in hdm_enqueue_thread()
794 if (c->cfg.direction == MOST_CH_RX) in hdm_enqueue_thread()
795 mbo->buffer_length = c->cfg.buffer_size; in hdm_enqueue_thread()
797 ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo); in hdm_enqueue_thread()
798 mutex_unlock(&c->nq_mutex); in hdm_enqueue_thread()
801 dev_err(&c->dev, "Buffer enqueue failed\n"); in hdm_enqueue_thread()
803 c->hdm_enqueue_task = NULL; in hdm_enqueue_thread()
811 static int run_enqueue_thread(struct most_channel *c, int channel_id) in run_enqueue_thread()
820 c->hdm_enqueue_task = task; in run_enqueue_thread()
825 * arm_mbo - recycle MBO for further usage
840 c = mbo->context; in arm_mbo()
842 if (c->is_poisoned) { in arm_mbo()
847 spin_lock_irqsave(&c->fifo_lock, flags); in arm_mbo()
848 ++*mbo->num_buffers_ptr; in arm_mbo()
849 list_add_tail(&mbo->list, &c->fifo); in arm_mbo()
850 spin_unlock_irqrestore(&c->fifo_lock, flags); in arm_mbo()
852 if (c->pipe0.refs && c->pipe0.comp->tx_completion) in arm_mbo()
853 c->pipe0.comp->tx_completion(c->iface, c->channel_id); in arm_mbo()
855 if (c->pipe1.refs && c->pipe1.comp->tx_completion) in arm_mbo()
856 c->pipe1.comp->tx_completion(c->iface, c->channel_id); in arm_mbo()
860 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
872 static int arm_mbo_chain(struct most_channel *c, int dir, in arm_mbo_chain()
875 unsigned int i; in arm_mbo_chain()
878 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len; in arm_mbo_chain()
880 atomic_set(&c->mbo_nq_level, 0); in arm_mbo_chain()
882 for (i = 0; i < c->cfg.num_buffers; i++) { in arm_mbo_chain()
887 mbo->context = c; in arm_mbo_chain()
888 mbo->ifp = c->iface; in arm_mbo_chain()
889 mbo->hdm_channel_id = c->channel_id; in arm_mbo_chain()
890 if (c->iface->dma_alloc) { in arm_mbo_chain()
891 mbo->virt_address = in arm_mbo_chain()
892 c->iface->dma_alloc(mbo, coherent_buf_size); in arm_mbo_chain()
894 mbo->virt_address = in arm_mbo_chain()
897 if (!mbo->virt_address) in arm_mbo_chain()
900 mbo->complete = compl; in arm_mbo_chain()
901 mbo->num_buffers_ptr = &dummy_num_buffers; in arm_mbo_chain()
904 atomic_inc(&c->mbo_nq_level); in arm_mbo_chain()
906 spin_lock_irqsave(&c->fifo_lock, flags); in arm_mbo_chain()
907 list_add_tail(&mbo->list, &c->fifo); in arm_mbo_chain()
908 spin_unlock_irqrestore(&c->fifo_lock, flags); in arm_mbo_chain()
911 return c->cfg.num_buffers; in arm_mbo_chain()
922 * most_submit_mbo - submits an MBO to fifo
927 if (WARN_ONCE(!mbo || !mbo->context, in most_submit_mbo()
936 * most_write_completion - write completion handler
946 c = mbo->context; in most_write_completion()
947 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) in most_write_completion()
953 int channel_has_mbo(struct most_interface *iface, int id, in channel_has_mbo()
954 struct most_component *comp) in channel_has_mbo() argument
956 struct most_channel *c = iface->p->channel[id]; in channel_has_mbo()
958 int empty; in channel_has_mbo()
961 return -EINVAL; in channel_has_mbo()
963 if (c->pipe0.refs && c->pipe1.refs && in channel_has_mbo()
964 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) || in channel_has_mbo()
965 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0))) in channel_has_mbo()
968 spin_lock_irqsave(&c->fifo_lock, flags); in channel_has_mbo()
969 empty = list_empty(&c->fifo); in channel_has_mbo()
970 spin_unlock_irqrestore(&c->fifo_lock, flags); in channel_has_mbo()
976 * most_get_mbo - get pointer to an MBO of pool
979 * @comp: driver component
984 struct mbo *most_get_mbo(struct most_interface *iface, int id, in most_get_mbo()
985 struct most_component *comp) in most_get_mbo() argument
990 int *num_buffers_ptr; in most_get_mbo()
992 c = iface->p->channel[id]; in most_get_mbo()
996 if (c->pipe0.refs && c->pipe1.refs && in most_get_mbo()
997 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) || in most_get_mbo()
998 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0))) in most_get_mbo()
1001 if (comp == c->pipe0.comp) in most_get_mbo()
1002 num_buffers_ptr = &c->pipe0.num_buffers; in most_get_mbo()
1003 else if (comp == c->pipe1.comp) in most_get_mbo()
1004 num_buffers_ptr = &c->pipe1.num_buffers; in most_get_mbo()
1008 spin_lock_irqsave(&c->fifo_lock, flags); in most_get_mbo()
1009 if (list_empty(&c->fifo)) { in most_get_mbo()
1010 spin_unlock_irqrestore(&c->fifo_lock, flags); in most_get_mbo()
1013 mbo = list_pop_mbo(&c->fifo); in most_get_mbo()
1014 --*num_buffers_ptr; in most_get_mbo()
1015 spin_unlock_irqrestore(&c->fifo_lock, flags); in most_get_mbo()
1017 mbo->num_buffers_ptr = num_buffers_ptr; in most_get_mbo()
1018 mbo->buffer_length = c->cfg.buffer_size; in most_get_mbo()
1024 * most_put_mbo - return buffer to pool
1029 struct most_channel *c = mbo->context; in most_put_mbo()
1031 if (c->cfg.direction == MOST_CH_TX) { in most_put_mbo()
1036 atomic_inc(&c->mbo_nq_level); in most_put_mbo()
1041 * most_read_completion - read completion handler
1052 struct most_channel *c = mbo->context; in most_read_completion()
1054 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) { in most_read_completion()
1059 if (mbo->status == MBO_E_INVAL) { in most_read_completion()
1061 atomic_inc(&c->mbo_nq_level); in most_read_completion()
1065 if (atomic_sub_and_test(1, &c->mbo_nq_level)) in most_read_completion()
1066 c->is_starving = 1; in most_read_completion()
1068 if (c->pipe0.refs && c->pipe0.comp->rx_completion && in most_read_completion()
1069 c->pipe0.comp->rx_completion(mbo) == 0) in most_read_completion()
1072 if (c->pipe1.refs && c->pipe1.comp->rx_completion && in most_read_completion()
1073 c->pipe1.comp->rx_completion(mbo) == 0) in most_read_completion()
1080 * most_start_channel - prepares a channel for communication
1083 * @comp: driver component
1085 * This prepares the channel for usage. Cross-checks whether the
1090 int most_start_channel(struct most_interface *iface, int id, in most_start_channel()
1091 struct most_component *comp) in most_start_channel() argument
1093 int num_buffer; in most_start_channel()
1094 int ret; in most_start_channel()
1095 struct most_channel *c = iface->p->channel[id]; in most_start_channel()
1098 return -EINVAL; in most_start_channel()
1100 mutex_lock(&c->start_mutex); in most_start_channel()
1101 if (c->pipe0.refs + c->pipe1.refs > 0) in most_start_channel()
1104 if (!try_module_get(iface->mod)) { in most_start_channel()
1105 dev_err(&c->dev, "Failed to acquire HDM lock\n"); in most_start_channel()
1106 mutex_unlock(&c->start_mutex); in most_start_channel()
1107 return -ENOLCK; in most_start_channel()
1110 c->cfg.extra_len = 0; in most_start_channel()
1111 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) { in most_start_channel()
1112 dev_err(&c->dev, "Channel configuration failed. Go check settings...\n"); in most_start_channel()
1113 ret = -EINVAL; in most_start_channel()
1117 init_waitqueue_head(&c->hdm_fifo_wq); in most_start_channel()
1119 if (c->cfg.direction == MOST_CH_RX) in most_start_channel()
1120 num_buffer = arm_mbo_chain(c, c->cfg.direction, in most_start_channel()
1123 num_buffer = arm_mbo_chain(c, c->cfg.direction, in most_start_channel()
1126 ret = -ENOMEM; in most_start_channel()
1134 c->is_starving = 0; in most_start_channel()
1135 c->pipe0.num_buffers = c->cfg.num_buffers / 2; in most_start_channel()
1136 c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers; in most_start_channel()
1137 atomic_set(&c->mbo_ref, num_buffer); in most_start_channel()
1140 if (comp == c->pipe0.comp) in most_start_channel()
1141 c->pipe0.refs++; in most_start_channel()
1142 if (comp == c->pipe1.comp) in most_start_channel()
1143 c->pipe1.refs++; in most_start_channel()
1144 mutex_unlock(&c->start_mutex); in most_start_channel()
1148 module_put(iface->mod); in most_start_channel()
1149 mutex_unlock(&c->start_mutex); in most_start_channel()
1155 * most_stop_channel - stops a running channel
1158 * @comp: driver component
1160 int most_stop_channel(struct most_interface *iface, int id, in most_stop_channel()
1161 struct most_component *comp) in most_stop_channel() argument
1165 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) { in most_stop_channel()
1167 return -EINVAL; in most_stop_channel()
1169 c = iface->p->channel[id]; in most_stop_channel()
1171 return -EINVAL; in most_stop_channel()
1173 mutex_lock(&c->start_mutex); in most_stop_channel()
1174 if (c->pipe0.refs + c->pipe1.refs >= 2) in most_stop_channel()
1177 if (c->hdm_enqueue_task) in most_stop_channel()
1178 kthread_stop(c->hdm_enqueue_task); in most_stop_channel()
1179 c->hdm_enqueue_task = NULL; in most_stop_channel()
1181 if (iface->mod) in most_stop_channel()
1182 module_put(iface->mod); in most_stop_channel()
1184 c->is_poisoned = true; in most_stop_channel()
1185 if (c->iface->poison_channel(c->iface, c->channel_id)) { in most_stop_channel()
1186 dev_err(&c->dev, "Failed to stop channel %d of interface %s\n", c->channel_id, in most_stop_channel()
1187 c->iface->description); in most_stop_channel()
1188 mutex_unlock(&c->start_mutex); in most_stop_channel()
1189 return -EAGAIN; in most_stop_channel()
1195 if (wait_for_completion_interruptible(&c->cleanup)) { in most_stop_channel()
1196 dev_err(&c->dev, "Interrupted while cleaning up channel %d\n", c->channel_id); in most_stop_channel()
1197 mutex_unlock(&c->start_mutex); in most_stop_channel()
1198 return -EINTR; in most_stop_channel()
1201 wait_for_completion(&c->cleanup); in most_stop_channel()
1203 c->is_poisoned = false; in most_stop_channel()
1206 if (comp == c->pipe0.comp) in most_stop_channel()
1207 c->pipe0.refs--; in most_stop_channel()
1208 if (comp == c->pipe1.comp) in most_stop_channel()
1209 c->pipe1.refs--; in most_stop_channel()
1210 mutex_unlock(&c->start_mutex); in most_stop_channel()
1216 * most_register_component - registers a driver component with the core
1217 * @comp: driver component
1219 int most_register_component(struct most_component *comp) in most_register_component() argument
1221 if (!comp) { in most_register_component()
1223 return -EINVAL; in most_register_component()
1225 list_add_tail(&comp->list, &comp_list); in most_register_component()
1230 static int disconnect_channels(struct device *dev, void *data) in disconnect_channels()
1234 struct most_component *comp = data; in disconnect_channels() local
1237 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { in disconnect_channels()
1238 if (c->pipe0.comp == comp || c->pipe1.comp == comp) in disconnect_channels()
1239 comp->disconnect_channel(c->iface, c->channel_id); in disconnect_channels()
1240 if (c->pipe0.comp == comp) in disconnect_channels()
1241 c->pipe0.comp = NULL; in disconnect_channels()
1242 if (c->pipe1.comp == comp) in disconnect_channels()
1243 c->pipe1.comp = NULL; in disconnect_channels()
1249 * most_deregister_component - deregisters a driver component with the core
1250 * @comp: driver component
1252 int most_deregister_component(struct most_component *comp) in most_deregister_component() argument
1254 if (!comp) { in most_deregister_component()
1256 return -EINVAL; in most_deregister_component()
1259 bus_for_each_dev(&mostbus, NULL, comp, disconnect_channels); in most_deregister_component()
1260 list_del(&comp->list); in most_deregister_component()
1273 * most_register_interface - registers an interface with core
1279 int most_register_interface(struct most_interface *iface) in most_register_interface()
1281 unsigned int i; in most_register_interface()
1282 int id; in most_register_interface()
1285 if (!iface || !iface->enqueue || !iface->configure || in most_register_interface()
1286 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) in most_register_interface()
1287 return -EINVAL; in most_register_interface()
1291 dev_err(iface->dev, "Failed to allocate device ID\n"); in most_register_interface()
1295 iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL); in most_register_interface()
1296 if (!iface->p) { in most_register_interface()
1298 return -ENOMEM; in most_register_interface()
1301 INIT_LIST_HEAD(&iface->p->channel_list); in most_register_interface()
1302 iface->p->dev_id = id; in most_register_interface()
1303 strscpy(iface->p->name, iface->description, sizeof(iface->p->name)); in most_register_interface()
1304 iface->dev->bus = &mostbus; in most_register_interface()
1305 iface->dev->groups = interface_attr_groups; in most_register_interface()
1306 dev_set_drvdata(iface->dev, iface); in most_register_interface()
1307 if (device_register(iface->dev)) { in most_register_interface()
1308 dev_err(iface->dev, "Failed to register interface device\n"); in most_register_interface()
1309 kfree(iface->p); in most_register_interface()
1310 put_device(iface->dev); in most_register_interface()
1312 return -ENOMEM; in most_register_interface()
1315 for (i = 0; i < iface->num_channels; i++) { in most_register_interface()
1316 const char *name_suffix = iface->channel_vector[i].name_suffix; in most_register_interface()
1322 snprintf(c->name, STRING_SIZE, "ch%d", i); in most_register_interface()
1324 snprintf(c->name, STRING_SIZE, "%s", name_suffix); in most_register_interface()
1325 c->dev.init_name = c->name; in most_register_interface()
1326 c->dev.parent = iface->dev; in most_register_interface()
1327 c->dev.groups = channel_attr_groups; in most_register_interface()
1328 c->dev.release = release_channel; in most_register_interface()
1329 iface->p->channel[i] = c; in most_register_interface()
1330 c->is_starving = 0; in most_register_interface()
1331 c->iface = iface; in most_register_interface()
1332 c->channel_id = i; in most_register_interface()
1333 c->keep_mbo = false; in most_register_interface()
1334 c->enqueue_halt = false; in most_register_interface()
1335 c->is_poisoned = false; in most_register_interface()
1336 c->cfg.direction = 0; in most_register_interface()
1337 c->cfg.data_type = 0; in most_register_interface()
1338 c->cfg.num_buffers = 0; in most_register_interface()
1339 c->cfg.buffer_size = 0; in most_register_interface()
1340 c->cfg.subbuffer_size = 0; in most_register_interface()
1341 c->cfg.packets_per_xact = 0; in most_register_interface()
1342 spin_lock_init(&c->fifo_lock); in most_register_interface()
1343 INIT_LIST_HEAD(&c->fifo); in most_register_interface()
1344 INIT_LIST_HEAD(&c->trash_fifo); in most_register_interface()
1345 INIT_LIST_HEAD(&c->halt_fifo); in most_register_interface()
1346 init_completion(&c->cleanup); in most_register_interface()
1347 atomic_set(&c->mbo_ref, 0); in most_register_interface()
1348 mutex_init(&c->start_mutex); in most_register_interface()
1349 mutex_init(&c->nq_mutex); in most_register_interface()
1350 list_add_tail(&c->list, &iface->p->channel_list); in most_register_interface()
1351 if (device_register(&c->dev)) { in most_register_interface()
1352 dev_err(&c->dev, "Failed to register channel device\n"); in most_register_interface()
1356 most_interface_register_notify(iface->description); in most_register_interface()
1360 put_device(&c->dev); in most_register_interface()
1364 c = iface->p->channel[--i]; in most_register_interface()
1365 device_unregister(&c->dev); in most_register_interface()
1367 kfree(iface->p); in most_register_interface()
1368 device_unregister(iface->dev); in most_register_interface()
1370 return -ENOMEM; in most_register_interface()
1375 * most_deregister_interface - deregisters an interface with core
1383 int i; in most_deregister_interface()
1386 for (i = 0; i < iface->num_channels; i++) { in most_deregister_interface()
1387 c = iface->p->channel[i]; in most_deregister_interface()
1388 if (c->pipe0.comp) in most_deregister_interface()
1389 c->pipe0.comp->disconnect_channel(c->iface, in most_deregister_interface()
1390 c->channel_id); in most_deregister_interface()
1391 if (c->pipe1.comp) in most_deregister_interface()
1392 c->pipe1.comp->disconnect_channel(c->iface, in most_deregister_interface()
1393 c->channel_id); in most_deregister_interface()
1394 c->pipe0.comp = NULL; in most_deregister_interface()
1395 c->pipe1.comp = NULL; in most_deregister_interface()
1396 list_del(&c->list); in most_deregister_interface()
1397 device_unregister(&c->dev); in most_deregister_interface()
1400 ida_free(&mdev_id, iface->p->dev_id); in most_deregister_interface()
1401 kfree(iface->p); in most_deregister_interface()
1402 device_unregister(iface->dev); in most_deregister_interface()
1407 * most_stop_enqueue - prevents core from enqueueing MBOs
1416 void most_stop_enqueue(struct most_interface *iface, int id) in most_stop_enqueue()
1418 struct most_channel *c = iface->p->channel[id]; in most_stop_enqueue()
1423 mutex_lock(&c->nq_mutex); in most_stop_enqueue()
1424 c->enqueue_halt = true; in most_stop_enqueue()
1425 mutex_unlock(&c->nq_mutex); in most_stop_enqueue()
1430 * most_resume_enqueue - allow core to enqueue MBOs again
1437 void most_resume_enqueue(struct most_interface *iface, int id) in most_resume_enqueue()
1439 struct most_channel *c = iface->p->channel[id]; in most_resume_enqueue()
1444 mutex_lock(&c->nq_mutex); in most_resume_enqueue()
1445 c->enqueue_halt = false; in most_resume_enqueue()
1446 mutex_unlock(&c->nq_mutex); in most_resume_enqueue()
1448 wake_up_interruptible(&c->hdm_fifo_wq); in most_resume_enqueue()
1452 static int __init most_init(void) in most_init()
1454 int err; in most_init()